aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 12:50:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 12:50:42 -0400
commitc9b8af00ff71f86ff3d092cc60ca673e1d0eae5b (patch)
tree25cc016481cc693552bebb4040041817280c2ccf
parentc59a264c9e932c828d533497e286b89e43c8d1be (diff)
parent82681a318f9f028ea64e61f24bbd9ac535531921 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (154 commits) [SCSI] osd: Remove out-of-tree left overs [SCSI] libosd: Use REQ_QUIET requests. [SCSI] osduld: use filp_open() when looking up an osd-device [SCSI] libosd: Define an osd_dev wrapper to retrieve the request_queue [SCSI] libosd: osd_req_{read,write} takes a length parameter [SCSI] libosd: Let _osd_req_finalize_data_integrity receive number of out_bytes [SCSI] libosd: osd_req_{read,write}_kern new API [SCSI] libosd: Better printout of OSD target system information [SCSI] libosd: OSD2r05: Attribute definitions [SCSI] libosd: OSD2r05: Additional command enums [SCSI] mpt fusion: fix up doc book comments [SCSI] mpt fusion: Added support for Broadcast primitives Event handling [SCSI] mpt fusion: Queue full event handling [SCSI] mpt fusion: RAID device handling and Dual port Raid support is added [SCSI] mpt fusion: Put IOC into ready state if it not already in ready state [SCSI] mpt fusion: Code Cleanup patch [SCSI] mpt fusion: Rescan SAS topology added [SCSI] mpt fusion: SAS topology scan changes, expander events [SCSI] mpt fusion: Firmware event implementation using seperate WorkQueue [SCSI] mpt fusion: rewrite of ioctl_cmds internal generated function ...
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/message/fusion/mptbase.c1571
-rw-r--r--drivers/message/fusion/mptbase.h180
-rw-r--r--drivers/message/fusion/mptctl.c692
-rw-r--r--drivers/message/fusion/mptdebug.h3
-rw-r--r--drivers/message/fusion/mptfc.c15
-rw-r--r--drivers/message/fusion/mptsas.c3114
-rw-r--r--drivers/message/fusion/mptsas.h41
-rw-r--r--drivers/message/fusion/mptscsih.c1329
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c71
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bnx2.c193
-rw-r--r--drivers/net/bnx2.h18
-rw-r--r--drivers/net/cnic.c2711
-rw-r--r--drivers/net/cnic.h299
-rw-r--r--drivers/net/cnic_defs.h580
-rw-r--r--drivers/net/cnic_if.h299
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c30
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h7
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c29
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c13
-rw-r--r--drivers/scsi/Kconfig31
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h155
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1509
-rw-r--r--drivers/scsi/bnx2i/Kconfig7
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2405
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c438
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2064
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c142
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c26
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c23
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c95
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/fcoe/libfcoe.c21
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/gdth_proc.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c434
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h68
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/libiscsi.c468
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/lpfc/lpfc.h123
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c250
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c275
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1365
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h142
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c674
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c930
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6683
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c62
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c32
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c363
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c36
-rw-r--r--drivers/scsi/mvsas.c3222
-rw-r--r--drivers/scsi/mvsas/Kconfig42
-rw-r--r--drivers/scsi/mvsas/Makefile32
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c793
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h151
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h280
-rw-r--r--drivers/scsi/mvsas/mv_defs.h502
-rw-r--r--drivers/scsi/mvsas/mv_init.c703
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2154
-rw-r--r--drivers/scsi/mvsas/mv_sas.h406
-rw-r--r--drivers/scsi/osd/Kbuild25
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_initiator.c83
-rw-r--r--drivers/scsi/osd/osd_uld.c66
-rw-r--r--drivers/scsi/qla1280.c387
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c227
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c240
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c244
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c173
-rw-r--r--drivers/scsi/sd.c45
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c66
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c49
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--fs/exofs/common.h6
-rw-r--r--fs/exofs/inode.c8
-rw-r--r--fs/exofs/osd.c26
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/scsi/fc/fc_fip.h7
-rw-r--r--include/scsi/iscsi_if.h49
-rw-r--r--include/scsi/libfc.h1
-rw-r--r--include/scsi/libiscsi.h8
-rw-r--r--include/scsi/osd_attributes.h74
-rw-r--r--include/scsi/osd_initiator.h14
-rw-r--r--include/scsi/osd_protocol.h8
-rw-r--r--include/scsi/scsi_transport_iscsi.h8
141 files changed, 42941 insertions, 9068 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 75223f50de58..0ba6ec876296 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
257{ 257{
258 struct iscsi_iser_task *iser_task = task->dd_data; 258 struct iscsi_iser_task *iser_task = task->dd_data;
259 259
260 /* 260 /* mgmt tasks do not need special cleanup */
261 * mgmt tasks do not need special cleanup and we do not 261 if (!task->sc)
262 * allocate anything in the init task callout
263 */
264 if (!task->sc || task->state == ISCSI_TASK_PENDING)
265 return; 262 return;
266 263
267 if (iser_task->status == ISER_TASK_STATUS_STARTED) { 264 if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
517} 514}
518 515
519static struct iscsi_endpoint * 516static struct iscsi_endpoint *
520iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) 517iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
518 int non_blocking)
521{ 519{
522 int err; 520 int err;
523 struct iser_conn *ib_conn; 521 struct iser_conn *ib_conn;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d496a99e034..44b931504457 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -146,7 +146,6 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
148 148
149static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
150 149
151/* 150/*
152 * Driver Callback Index's 151 * Driver Callback Index's
@@ -159,7 +158,8 @@ static u8 last_drv_idx;
159 * Forward protos... 158 * Forward protos...
160 */ 159 */
161static irqreturn_t mpt_interrupt(int irq, void *bus_id); 160static irqreturn_t mpt_interrupt(int irq, void *bus_id);
162static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); 161static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
162 MPT_FRAME_HDR *reply);
163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, 163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
164 u32 *req, int replyBytes, u16 *u16reply, int maxwait, 164 u32 *req, int replyBytes, u16 *u16reply, int maxwait,
165 int sleepFlag); 165 int sleepFlag);
@@ -190,9 +190,9 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); 192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
193static void mpt_timer_expired(unsigned long data);
194static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); 193static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
195static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 194static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
195 int sleepFlag);
196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); 197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); 198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
@@ -207,8 +207,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
207#endif 207#endif
208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); 208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
209 209
210//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 210static int ProcessEventNotification(MPT_ADAPTER *ioc,
211static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); 211 EventNotificationReply_t *evReply, int *evHandlers);
212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); 214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
@@ -277,6 +277,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
277} 277}
278 278
279/** 279/**
280 * mpt_is_discovery_complete - determine if discovery has completed
281 * @ioc: per adatper instance
282 *
283 * Returns 1 when discovery completed, else zero.
284 */
285static int
286mpt_is_discovery_complete(MPT_ADAPTER *ioc)
287{
288 ConfigExtendedPageHeader_t hdr;
289 CONFIGPARMS cfg;
290 SasIOUnitPage0_t *buffer;
291 dma_addr_t dma_handle;
292 int rc = 0;
293
294 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
295 memset(&cfg, 0, sizeof(CONFIGPARMS));
296 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
297 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
298 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
299 cfg.cfghdr.ehdr = &hdr;
300 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
301
302 if ((mpt_config(ioc, &cfg)))
303 goto out;
304 if (!hdr.ExtPageLength)
305 goto out;
306
307 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
308 &dma_handle);
309 if (!buffer)
310 goto out;
311
312 cfg.physAddr = dma_handle;
313 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
314
315 if ((mpt_config(ioc, &cfg)))
316 goto out_free_consistent;
317
318 if (!(buffer->PhyData[0].PortFlags &
319 MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
320 rc = 1;
321
322 out_free_consistent:
323 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
324 buffer, dma_handle);
325 out:
326 return rc;
327}
328
329/**
280 * mpt_fault_reset_work - work performed on workq after ioc fault 330 * mpt_fault_reset_work - work performed on workq after ioc fault
281 * @work: input argument, used to derive ioc 331 * @work: input argument, used to derive ioc
282 * 332 *
@@ -290,7 +340,7 @@ mpt_fault_reset_work(struct work_struct *work)
290 int rc; 340 int rc;
291 unsigned long flags; 341 unsigned long flags;
292 342
293 if (ioc->diagPending || !ioc->active) 343 if (ioc->ioc_reset_in_progress || !ioc->active)
294 goto out; 344 goto out;
295 345
296 ioc_raw_state = mpt_GetIocState(ioc, 0); 346 ioc_raw_state = mpt_GetIocState(ioc, 0);
@@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work)
307 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 357 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
308 "reset (%04xh)\n", ioc->name, ioc_raw_state & 358 "reset (%04xh)\n", ioc->name, ioc_raw_state &
309 MPI_DOORBELL_DATA_MASK); 359 MPI_DOORBELL_DATA_MASK);
360 } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
361 if ((mpt_is_discovery_complete(ioc))) {
362 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
363 "discovery_quiesce_io flag\n", ioc->name));
364 ioc->sas_discovery_quiesce_io = 0;
365 }
310 } 366 }
311 367
312 out: 368 out:
@@ -317,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work)
317 ioc = ioc->alt_ioc; 373 ioc = ioc->alt_ioc;
318 374
319 /* rearm the timer */ 375 /* rearm the timer */
320 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 376 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
321 if (ioc->reset_work_q) 377 if (ioc->reset_work_q)
322 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, 378 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
323 msecs_to_jiffies(MPT_POLLING_INTERVAL)); 379 msecs_to_jiffies(MPT_POLLING_INTERVAL));
324 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 380 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
325} 381}
326 382
327 383
@@ -501,9 +557,9 @@ mpt_interrupt(int irq, void *bus_id)
501 557
502/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 558/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
503/** 559/**
504 * mpt_base_reply - MPT base driver's callback routine 560 * mptbase_reply - MPT base driver's callback routine
505 * @ioc: Pointer to MPT_ADAPTER structure 561 * @ioc: Pointer to MPT_ADAPTER structure
506 * @mf: Pointer to original MPT request frame 562 * @req: Pointer to original MPT request frame
507 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 563 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
508 * 564 *
509 * MPT base driver's callback routine; all base driver 565 * MPT base driver's callback routine; all base driver
@@ -514,122 +570,49 @@ mpt_interrupt(int irq, void *bus_id)
514 * should be freed, or 0 if it shouldn't. 570 * should be freed, or 0 if it shouldn't.
515 */ 571 */
516static int 572static int
517mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) 573mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
518{ 574{
575 EventNotificationReply_t *pEventReply;
576 u8 event;
577 int evHandlers;
519 int freereq = 1; 578 int freereq = 1;
520 u8 func;
521 579
522 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name)); 580 switch (reply->u.hdr.Function) {
523#ifdef CONFIG_FUSION_LOGGING 581 case MPI_FUNCTION_EVENT_NOTIFICATION:
524 if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) && 582 pEventReply = (EventNotificationReply_t *)reply;
525 !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { 583 evHandlers = 0;
526 dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n", 584 ProcessEventNotification(ioc, pEventReply, &evHandlers);
527 ioc->name, mf)); 585 event = le32_to_cpu(pEventReply->Event) & 0xFF;
528 DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf); 586 if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
529 }
530#endif
531
532 func = reply->u.hdr.Function;
533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n",
534 ioc->name, func));
535
536 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
537 EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
538 int evHandlers = 0;
539 int results;
540
541 results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
542 if (results != evHandlers) {
543 /* CHECKME! Any special handling needed here? */
544 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
545 ioc->name, evHandlers, results));
546 }
547
548 /*
549 * Hmmm... It seems that EventNotificationReply is an exception
550 * to the rule of one reply per request.
551 */
552 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
553 freereq = 0; 587 freereq = 0;
554 } else { 588 if (event != MPI_EVENT_EVENT_CHANGE)
555 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 589 break;
556 ioc->name, pEvReply)); 590 case MPI_FUNCTION_CONFIG:
557 } 591 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
558 592 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
559#ifdef CONFIG_PROC_FS 593 if (reply) {
560// LogEvent(ioc, pEvReply); 594 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
561#endif 595 memcpy(ioc->mptbase_cmds.reply, reply,
562 596 min(MPT_DEFAULT_FRAME_SIZE,
563 } else if (func == MPI_FUNCTION_EVENT_ACK) { 597 4 * reply->u.reply.MsgLength));
564 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n",
565 ioc->name));
566 } else if (func == MPI_FUNCTION_CONFIG) {
567 CONFIGPARMS *pCfg;
568 unsigned long flags;
569
570 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n",
571 ioc->name, mf, reply));
572
573 pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
574
575 if (pCfg) {
576 /* disable timer and remove from linked list */
577 del_timer(&pCfg->timer);
578
579 spin_lock_irqsave(&ioc->FreeQlock, flags);
580 list_del(&pCfg->linkage);
581 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
582
583 /*
584 * If IOC Status is SUCCESS, save the header
585 * and set the status code to GOOD.
586 */
587 pCfg->status = MPT_CONFIG_ERROR;
588 if (reply) {
589 ConfigReply_t *pReply = (ConfigReply_t *)reply;
590 u16 status;
591
592 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
593 dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n",
594 ioc->name, status, le32_to_cpu(pReply->IOCLogInfo)));
595
596 pCfg->status = status;
597 if (status == MPI_IOCSTATUS_SUCCESS) {
598 if ((pReply->Header.PageType &
599 MPI_CONFIG_PAGETYPE_MASK) ==
600 MPI_CONFIG_PAGETYPE_EXTENDED) {
601 pCfg->cfghdr.ehdr->ExtPageLength =
602 le16_to_cpu(pReply->ExtPageLength);
603 pCfg->cfghdr.ehdr->ExtPageType =
604 pReply->ExtPageType;
605 }
606 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
607
608 /* If this is a regular header, save PageLength. */
609 /* LMP Do this better so not using a reserved field! */
610 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
611 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
612 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
613 }
614 }
615
616 /*
617 * Wake up the original calling thread
618 */
619 pCfg->wait_done = 1;
620 wake_up(&mpt_waitq);
621 } 598 }
622 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) { 599 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
623 /* we should be always getting a reply frame */ 600 ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
624 memcpy(ioc->persist_reply_frame, reply, 601 complete(&ioc->mptbase_cmds.done);
625 min(MPT_DEFAULT_FRAME_SIZE, 602 } else
626 4*reply->u.reply.MsgLength)); 603 freereq = 0;
627 del_timer(&ioc->persist_timer); 604 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
628 ioc->persist_wait_done = 1; 605 freereq = 1;
629 wake_up(&mpt_waitq); 606 break;
630 } else { 607 case MPI_FUNCTION_EVENT_ACK:
631 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 608 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
632 ioc->name, func); 609 "EventAck reply received\n", ioc->name));
610 break;
611 default:
612 printk(MYIOC_s_ERR_FMT
613 "Unexpected msg function (=%02Xh) reply received!\n",
614 ioc->name, reply->u.hdr.Function);
615 break;
633 } 616 }
634 617
635 /* 618 /*
@@ -988,17 +971,21 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
988 971
989 /* Put Request back on FreeQ! */ 972 /* Put Request back on FreeQ! */
990 spin_lock_irqsave(&ioc->FreeQlock, flags); 973 spin_lock_irqsave(&ioc->FreeQlock, flags);
991 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */ 974 if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
975 goto out;
976 /* signature to know if this mf is freed */
977 mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
992 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 978 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
993#ifdef MFCNT 979#ifdef MFCNT
994 ioc->mfcnt--; 980 ioc->mfcnt--;
995#endif 981#endif
982 out:
996 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 983 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
997} 984}
998 985
999/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1000/** 987/**
1001 * mpt_add_sge - Place a simple SGE at address pAddr. 988 * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
1002 * @pAddr: virtual address for SGE 989 * @pAddr: virtual address for SGE
1003 * @flagslength: SGE flags and data transfer length 990 * @flagslength: SGE flags and data transfer length
1004 * @dma_addr: Physical address 991 * @dma_addr: Physical address
@@ -1006,23 +993,117 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1006 * This routine places a MPT request frame back on the MPT adapter's 993 * This routine places a MPT request frame back on the MPT adapter's
1007 * FreeQ. 994 * FreeQ.
1008 */ 995 */
1009void 996static void
1010mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) 997mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1011{ 998{
1012 if (sizeof(dma_addr_t) == sizeof(u64)) { 999 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
1013 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1000 pSge->FlagsLength = cpu_to_le32(flagslength);
1001 pSge->Address = cpu_to_le32(dma_addr);
1002}
1003
1004/**
1005 * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
1006 * @pAddr: virtual address for SGE
1007 * @flagslength: SGE flags and data transfer length
1008 * @dma_addr: Physical address
1009 *
1010 * This routine places a MPT request frame back on the MPT adapter's
1011 * FreeQ.
1012 **/
1013static void
1014mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1015{
1016 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1017 pSge->Address.Low = cpu_to_le32
1018 (lower_32_bits((unsigned long)(dma_addr)));
1019 pSge->Address.High = cpu_to_le32
1020 (upper_32_bits((unsigned long)dma_addr));
1021 pSge->FlagsLength = cpu_to_le32
1022 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1023}
1024
1025/**
1026 * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr
1027 * (1078 workaround).
1028 * @pAddr: virtual address for SGE
1029 * @flagslength: SGE flags and data transfer length
1030 * @dma_addr: Physical address
1031 *
1032 * This routine places a MPT request frame back on the MPT adapter's
1033 * FreeQ.
1034 **/
1035static void
1036mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1037{
1038 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1039 u32 tmp;
1040
1041 pSge->Address.Low = cpu_to_le32
1042 (lower_32_bits((unsigned long)(dma_addr)));
1043 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1044
1045 /*
1046 * 1078 errata workaround for the 36GB limitation
1047 */
1048 if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
1049 flagslength |=
1050 MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
1051 tmp |= (1<<31);
1052 if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
1053 printk(KERN_DEBUG "1078 P0M2 addressing for "
1054 "addr = 0x%llx len = %d\n",
1055 (unsigned long long)dma_addr,
1056 MPI_SGE_LENGTH(flagslength));
1057 }
1058
1059 pSge->Address.High = cpu_to_le32(tmp);
1060 pSge->FlagsLength = cpu_to_le32(
1061 (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1062}
1063
1064/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1065/**
1066 * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
1067 * @pAddr: virtual address for SGE
1068 * @next: nextChainOffset value (u32's)
1069 * @length: length of next SGL segment
1070 * @dma_addr: Physical address
1071 *
1072 */
1073static void
1074mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1075{
1076 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1077 pChain->Length = cpu_to_le16(length);
1078 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1079 pChain->NextChainOffset = next;
1080 pChain->Address = cpu_to_le32(dma_addr);
1081}
1082
1083/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1084/**
1085 * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
1086 * @pAddr: virtual address for SGE
1087 * @next: nextChainOffset value (u32's)
1088 * @length: length of next SGL segment
1089 * @dma_addr: Physical address
1090 *
1091 */
1092static void
1093mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1094{
1095 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1014 u32 tmp = dma_addr & 0xFFFFFFFF; 1096 u32 tmp = dma_addr & 0xFFFFFFFF;
1015 1097
1016 pSge->FlagsLength = cpu_to_le32(flagslength); 1098 pChain->Length = cpu_to_le16(length);
1017 pSge->Address.Low = cpu_to_le32(tmp); 1099 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1018 tmp = (u32) ((u64)dma_addr >> 32); 1100 MPI_SGE_FLAGS_64_BIT_ADDRESSING);
1019 pSge->Address.High = cpu_to_le32(tmp);
1020 1101
1021 } else { 1102 pChain->NextChainOffset = next;
1022 SGESimple32_t *pSge = (SGESimple32_t *) pAddr; 1103
1023 pSge->FlagsLength = cpu_to_le32(flagslength); 1104 pChain->Address.Low = cpu_to_le32(tmp);
1024 pSge->Address = cpu_to_le32(dma_addr); 1105 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1025 } 1106 pChain->Address.High = cpu_to_le32(tmp);
1026} 1107}
1027 1108
1028/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1109/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1225,7 +1306,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1225 } 1306 }
1226 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; 1307 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1227 flags_length |= ioc->HostPageBuffer_sz; 1308 flags_length |= ioc->HostPageBuffer_sz;
1228 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1309 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1229 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; 1310 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1230 1311
1231return 0; 1312return 0;
@@ -1534,21 +1615,42 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1534 1615
1535 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1616 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1536 1617
1537 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1618 if (sizeof(dma_addr_t) > 4) {
1538 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1619 const uint64_t required_mask = dma_get_required_mask
1539 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1620 (&pdev->dev);
1540 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1621 if (required_mask > DMA_BIT_MASK(32)
1541 ioc->name)); 1622 && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1542 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1623 && !pci_set_consistent_dma_mask(pdev,
1543 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1624 DMA_BIT_MASK(64))) {
1544 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1625 ioc->dma_mask = DMA_BIT_MASK(64);
1545 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1626 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1546 ioc->name)); 1627 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1628 ioc->name));
1629 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1630 && !pci_set_consistent_dma_mask(pdev,
1631 DMA_BIT_MASK(32))) {
1632 ioc->dma_mask = DMA_BIT_MASK(32);
1633 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1634 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1635 ioc->name));
1636 } else {
1637 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1638 ioc->name, pci_name(pdev));
1639 return r;
1640 }
1547 } else { 1641 } else {
1548 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1642 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1549 ioc->name, pci_name(pdev)); 1643 && !pci_set_consistent_dma_mask(pdev,
1550 pci_release_selected_regions(pdev, ioc->bars); 1644 DMA_BIT_MASK(32))) {
1551 return r; 1645 ioc->dma_mask = DMA_BIT_MASK(32);
1646 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1647 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1648 ioc->name));
1649 } else {
1650 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1651 ioc->name, pci_name(pdev));
1652 return r;
1653 }
1552 } 1654 }
1553 1655
1554 mem_phys = msize = 0; 1656 mem_phys = msize = 0;
@@ -1632,6 +1734,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1632 1734
1633 ioc->id = mpt_ids++; 1735 ioc->id = mpt_ids++;
1634 sprintf(ioc->name, "ioc%d", ioc->id); 1736 sprintf(ioc->name, "ioc%d", ioc->id);
1737 dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1635 1738
1636 /* 1739 /*
1637 * set initial debug level 1740 * set initial debug level
@@ -1650,14 +1753,36 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1650 return r; 1753 return r;
1651 } 1754 }
1652 1755
1756 /*
1757 * Setting up proper handlers for scatter gather handling
1758 */
1759 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
1760 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
1761 ioc->add_sge = &mpt_add_sge_64bit_1078;
1762 else
1763 ioc->add_sge = &mpt_add_sge_64bit;
1764 ioc->add_chain = &mpt_add_chain_64bit;
1765 ioc->sg_addr_size = 8;
1766 } else {
1767 ioc->add_sge = &mpt_add_sge;
1768 ioc->add_chain = &mpt_add_chain;
1769 ioc->sg_addr_size = 4;
1770 }
1771 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
1772
1653 ioc->alloc_total = sizeof(MPT_ADAPTER); 1773 ioc->alloc_total = sizeof(MPT_ADAPTER);
1654 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1774 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1655 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1775 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1656 1776
1657 ioc->pcidev = pdev; 1777 ioc->pcidev = pdev;
1658 ioc->diagPending = 0; 1778
1659 spin_lock_init(&ioc->diagLock); 1779 spin_lock_init(&ioc->taskmgmt_lock);
1660 spin_lock_init(&ioc->initializing_hba_lock); 1780 mutex_init(&ioc->internal_cmds.mutex);
1781 init_completion(&ioc->internal_cmds.done);
1782 mutex_init(&ioc->mptbase_cmds.mutex);
1783 init_completion(&ioc->mptbase_cmds.done);
1784 mutex_init(&ioc->taskmgmt_cmds.mutex);
1785 init_completion(&ioc->taskmgmt_cmds.done);
1661 1786
1662 /* Initialize the event logging. 1787 /* Initialize the event logging.
1663 */ 1788 */
@@ -1670,16 +1795,13 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1670 ioc->mfcnt = 0; 1795 ioc->mfcnt = 0;
1671#endif 1796#endif
1672 1797
1798 ioc->sh = NULL;
1673 ioc->cached_fw = NULL; 1799 ioc->cached_fw = NULL;
1674 1800
1675 /* Initilize SCSI Config Data structure 1801 /* Initilize SCSI Config Data structure
1676 */ 1802 */
1677 memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); 1803 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1678 1804
1679 /* Initialize the running configQ head.
1680 */
1681 INIT_LIST_HEAD(&ioc->configQ);
1682
1683 /* Initialize the fc rport list head. 1805 /* Initialize the fc rport list head.
1684 */ 1806 */
1685 INIT_LIST_HEAD(&ioc->fc_rports); 1807 INIT_LIST_HEAD(&ioc->fc_rports);
@@ -1690,9 +1812,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1690 1812
1691 /* Initialize workqueue */ 1813 /* Initialize workqueue */
1692 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); 1814 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1693 spin_lock_init(&ioc->fault_reset_work_lock);
1694 1815
1695 snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name), 1816 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1696 "mpt_poll_%d", ioc->id); 1817 "mpt_poll_%d", ioc->id);
1697 ioc->reset_work_q = 1818 ioc->reset_work_q =
1698 create_singlethread_workqueue(ioc->reset_work_q_name); 1819 create_singlethread_workqueue(ioc->reset_work_q_name);
@@ -1767,11 +1888,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1767 case MPI_MANUFACTPAGE_DEVID_SAS1064: 1888 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1768 case MPI_MANUFACTPAGE_DEVID_SAS1068: 1889 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1769 ioc->errata_flag_1064 = 1; 1890 ioc->errata_flag_1064 = 1;
1891 ioc->bus_type = SAS;
1892 break;
1770 1893
1771 case MPI_MANUFACTPAGE_DEVID_SAS1064E: 1894 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1772 case MPI_MANUFACTPAGE_DEVID_SAS1068E: 1895 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1773 case MPI_MANUFACTPAGE_DEVID_SAS1078: 1896 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1774 ioc->bus_type = SAS; 1897 ioc->bus_type = SAS;
1898 break;
1775 } 1899 }
1776 1900
1777 1901
@@ -1813,6 +1937,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1813 */ 1937 */
1814 mpt_detect_bound_ports(ioc, pdev); 1938 mpt_detect_bound_ports(ioc, pdev);
1815 1939
1940 INIT_LIST_HEAD(&ioc->fw_event_list);
1941 spin_lock_init(&ioc->fw_event_lock);
1942 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
1943 ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
1944
1816 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 1945 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
1817 CAN_SLEEP)) != 0){ 1946 CAN_SLEEP)) != 0){
1818 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", 1947 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
@@ -1885,13 +2014,18 @@ mpt_detach(struct pci_dev *pdev)
1885 /* 2014 /*
1886 * Stop polling ioc for fault condition 2015 * Stop polling ioc for fault condition
1887 */ 2016 */
1888 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 2017 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1889 wq = ioc->reset_work_q; 2018 wq = ioc->reset_work_q;
1890 ioc->reset_work_q = NULL; 2019 ioc->reset_work_q = NULL;
1891 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 2020 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1892 cancel_delayed_work(&ioc->fault_reset_work); 2021 cancel_delayed_work(&ioc->fault_reset_work);
1893 destroy_workqueue(wq); 2022 destroy_workqueue(wq);
1894 2023
2024 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2025 wq = ioc->fw_event_q;
2026 ioc->fw_event_q = NULL;
2027 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2028 destroy_workqueue(wq);
1895 2029
1896 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); 2030 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
1897 remove_proc_entry(pname, NULL); 2031 remove_proc_entry(pname, NULL);
@@ -1994,6 +2128,21 @@ mpt_resume(struct pci_dev *pdev)
1994 if (err) 2128 if (err)
1995 return err; 2129 return err;
1996 2130
2131 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
2132 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
2133 ioc->add_sge = &mpt_add_sge_64bit_1078;
2134 else
2135 ioc->add_sge = &mpt_add_sge_64bit;
2136 ioc->add_chain = &mpt_add_chain_64bit;
2137 ioc->sg_addr_size = 8;
2138 } else {
2139
2140 ioc->add_sge = &mpt_add_sge;
2141 ioc->add_chain = &mpt_add_chain;
2142 ioc->sg_addr_size = 4;
2143 }
2144 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
2145
1997 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", 2146 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
1998 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), 2147 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
1999 CHIPREG_READ32(&ioc->chip->Doorbell)); 2148 CHIPREG_READ32(&ioc->chip->Doorbell));
@@ -2091,12 +2240,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2091 ioc->active = 0; 2240 ioc->active = 0;
2092 2241
2093 if (ioc->alt_ioc) { 2242 if (ioc->alt_ioc) {
2094 if (ioc->alt_ioc->active) 2243 if (ioc->alt_ioc->active ||
2244 reason == MPT_HOSTEVENT_IOC_RECOVER) {
2095 reset_alt_ioc_active = 1; 2245 reset_alt_ioc_active = 1;
2096 2246 /* Disable alt-IOC's reply interrupts
2097 /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */ 2247 * (and FreeQ) for a bit
2098 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); 2248 **/
2099 ioc->alt_ioc->active = 0; 2249 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2250 0xFFFFFFFF);
2251 ioc->alt_ioc->active = 0;
2252 }
2100 } 2253 }
2101 2254
2102 hard = 1; 2255 hard = 1;
@@ -2117,9 +2270,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2117 } 2270 }
2118 2271
2119 } else { 2272 } else {
2120 printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name); 2273 printk(MYIOC_s_WARN_FMT
2274 "NOT READY WARNING!\n", ioc->name);
2121 } 2275 }
2122 return -1; 2276 ret = -1;
2277 goto out;
2123 } 2278 }
2124 2279
2125 /* hard_reset_done = 0 if a soft reset was performed 2280 /* hard_reset_done = 0 if a soft reset was performed
@@ -2129,7 +2284,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2129 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) 2284 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
2130 alt_ioc_ready = 1; 2285 alt_ioc_ready = 1;
2131 else 2286 else
2132 printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name); 2287 printk(MYIOC_s_WARN_FMT
2288 ": alt-ioc Not ready WARNING!\n",
2289 ioc->alt_ioc->name);
2133 } 2290 }
2134 2291
2135 for (ii=0; ii<5; ii++) { 2292 for (ii=0; ii<5; ii++) {
@@ -2150,7 +2307,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2150 if (alt_ioc_ready) { 2307 if (alt_ioc_ready) {
2151 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { 2308 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
2152 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2309 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2153 "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); 2310 "Initial Alt IocFacts failed rc=%x\n",
2311 ioc->name, rc));
2154 /* Retry - alt IOC was initialized once 2312 /* Retry - alt IOC was initialized once
2155 */ 2313 */
2156 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); 2314 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
@@ -2194,16 +2352,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2194 IRQF_SHARED, ioc->name, ioc); 2352 IRQF_SHARED, ioc->name, ioc);
2195 if (rc < 0) { 2353 if (rc < 0) {
2196 printk(MYIOC_s_ERR_FMT "Unable to allocate " 2354 printk(MYIOC_s_ERR_FMT "Unable to allocate "
2197 "interrupt %d!\n", ioc->name, ioc->pcidev->irq); 2355 "interrupt %d!\n",
2356 ioc->name, ioc->pcidev->irq);
2198 if (ioc->msi_enable) 2357 if (ioc->msi_enable)
2199 pci_disable_msi(ioc->pcidev); 2358 pci_disable_msi(ioc->pcidev);
2200 return -EBUSY; 2359 ret = -EBUSY;
2360 goto out;
2201 } 2361 }
2202 irq_allocated = 1; 2362 irq_allocated = 1;
2203 ioc->pci_irq = ioc->pcidev->irq; 2363 ioc->pci_irq = ioc->pcidev->irq;
2204 pci_set_master(ioc->pcidev); /* ?? */ 2364 pci_set_master(ioc->pcidev); /* ?? */
2205 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2365 pci_set_drvdata(ioc->pcidev, ioc);
2206 "%d\n", ioc->name, ioc->pcidev->irq)); 2366 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2367 "installed at interrupt %d\n", ioc->name,
2368 ioc->pcidev->irq));
2207 } 2369 }
2208 } 2370 }
2209 2371
@@ -2212,17 +2374,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2212 * init as upper addresses are needed for init. 2374 * init as upper addresses are needed for init.
2213 * If fails, continue with alt-ioc processing 2375 * If fails, continue with alt-ioc processing
2214 */ 2376 */
2377 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
2378 ioc->name));
2215 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) 2379 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
2216 ret = -3; 2380 ret = -3;
2217 2381
2218 /* May need to check/upload firmware & data here! 2382 /* May need to check/upload firmware & data here!
2219 * If fails, continue with alt-ioc processing 2383 * If fails, continue with alt-ioc processing
2220 */ 2384 */
2385 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
2386 ioc->name));
2221 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) 2387 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
2222 ret = -4; 2388 ret = -4;
2223// NEW! 2389// NEW!
2224 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { 2390 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
2225 printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n", 2391 printk(MYIOC_s_WARN_FMT
2392 ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
2226 ioc->alt_ioc->name, rc); 2393 ioc->alt_ioc->name, rc);
2227 alt_ioc_ready = 0; 2394 alt_ioc_ready = 0;
2228 reset_alt_ioc_active = 0; 2395 reset_alt_ioc_active = 0;
@@ -2232,8 +2399,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2232 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { 2399 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
2233 alt_ioc_ready = 0; 2400 alt_ioc_ready = 0;
2234 reset_alt_ioc_active = 0; 2401 reset_alt_ioc_active = 0;
2235 printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n", 2402 printk(MYIOC_s_WARN_FMT
2236 ioc->alt_ioc->name, rc); 2403 ": alt-ioc: (%d) init failure WARNING!\n",
2404 ioc->alt_ioc->name, rc);
2237 } 2405 }
2238 } 2406 }
2239 2407
@@ -2269,28 +2437,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2269 } 2437 }
2270 } 2438 }
2271 2439
2440 /* Enable MPT base driver management of EventNotification
2441 * and EventAck handling.
2442 */
2443 if ((ret == 0) && (!ioc->facts.EventState)) {
2444 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2445 "SendEventNotification\n",
2446 ioc->name));
2447 ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
2448 }
2449
2450 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2451 rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
2452
2272 if (ret == 0) { 2453 if (ret == 0) {
2273 /* Enable! (reply interrupt) */ 2454 /* Enable! (reply interrupt) */
2274 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 2455 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
2275 ioc->active = 1; 2456 ioc->active = 1;
2276 } 2457 }
2277 2458 if (rc == 0) { /* alt ioc */
2278 if (reset_alt_ioc_active && ioc->alt_ioc) { 2459 if (reset_alt_ioc_active && ioc->alt_ioc) {
2279 /* (re)Enable alt-IOC! (reply interrupt) */ 2460 /* (re)Enable alt-IOC! (reply interrupt) */
2280 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n", 2461 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
2281 ioc->alt_ioc->name)); 2462 "reply irq re-enabled\n",
2282 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); 2463 ioc->alt_ioc->name));
2283 ioc->alt_ioc->active = 1; 2464 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2465 MPI_HIM_DIM);
2466 ioc->alt_ioc->active = 1;
2467 }
2284 } 2468 }
2285 2469
2286 /* Enable MPT base driver management of EventNotification
2287 * and EventAck handling.
2288 */
2289 if ((ret == 0) && (!ioc->facts.EventState))
2290 (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */
2291
2292 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2293 (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */
2294 2470
2295 /* Add additional "reason" check before call to GetLanConfigPages 2471 /* Add additional "reason" check before call to GetLanConfigPages
2296 * (combined with GetIoUnitPage2 call). This prevents a somewhat 2472 * (combined with GetIoUnitPage2 call). This prevents a somewhat
@@ -2306,8 +2482,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2306 mutex_init(&ioc->raid_data.inactive_list_mutex); 2482 mutex_init(&ioc->raid_data.inactive_list_mutex);
2307 INIT_LIST_HEAD(&ioc->raid_data.inactive_list); 2483 INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
2308 2484
2309 if (ioc->bus_type == SAS) { 2485 switch (ioc->bus_type) {
2310 2486
2487 case SAS:
2311 /* clear persistency table */ 2488 /* clear persistency table */
2312 if(ioc->facts.IOCExceptions & 2489 if(ioc->facts.IOCExceptions &
2313 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { 2490 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
@@ -2321,8 +2498,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2321 */ 2498 */
2322 mpt_findImVolumes(ioc); 2499 mpt_findImVolumes(ioc);
2323 2500
2324 } else if (ioc->bus_type == FC) { 2501 /* Check, and possibly reset, the coalescing value
2325 if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && 2502 */
2503 mpt_read_ioc_pg_1(ioc);
2504
2505 break;
2506
2507 case FC:
2508 if ((ioc->pfacts[0].ProtocolFlags &
2509 MPI_PORTFACTS_PROTOCOL_LAN) &&
2326 (ioc->lan_cnfg_page0.Header.PageLength == 0)) { 2510 (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
2327 /* 2511 /*
2328 * Pre-fetch the ports LAN MAC address! 2512 * Pre-fetch the ports LAN MAC address!
@@ -2331,11 +2515,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2331 (void) GetLanConfigPages(ioc); 2515 (void) GetLanConfigPages(ioc);
2332 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 2516 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
2333 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2517 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2334 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", 2518 "LanAddr = %02X:%02X:%02X"
2335 ioc->name, a[5], a[4], a[3], a[2], a[1], a[0])); 2519 ":%02X:%02X:%02X\n",
2336 2520 ioc->name, a[5], a[4],
2521 a[3], a[2], a[1], a[0]));
2337 } 2522 }
2338 } else { 2523 break;
2524
2525 case SPI:
2339 /* Get NVRAM and adapter maximums from SPP 0 and 2 2526 /* Get NVRAM and adapter maximums from SPP 0 and 2
2340 */ 2527 */
2341 mpt_GetScsiPortSettings(ioc, 0); 2528 mpt_GetScsiPortSettings(ioc, 0);
@@ -2354,6 +2541,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2354 mpt_read_ioc_pg_1(ioc); 2541 mpt_read_ioc_pg_1(ioc);
2355 2542
2356 mpt_read_ioc_pg_4(ioc); 2543 mpt_read_ioc_pg_4(ioc);
2544
2545 break;
2357 } 2546 }
2358 2547
2359 GetIoUnitPage2(ioc); 2548 GetIoUnitPage2(ioc);
@@ -2435,16 +2624,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
2435 if (_pcidev == peer) { 2624 if (_pcidev == peer) {
2436 /* Paranoia checks */ 2625 /* Paranoia checks */
2437 if (ioc->alt_ioc != NULL) { 2626 if (ioc->alt_ioc != NULL) {
2438 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2627 printk(MYIOC_s_WARN_FMT
2439 ioc->name, ioc->alt_ioc->name); 2628 "Oops, already bound (%s <==> %s)!\n",
2629 ioc->name, ioc->name, ioc->alt_ioc->name);
2440 break; 2630 break;
2441 } else if (ioc_srch->alt_ioc != NULL) { 2631 } else if (ioc_srch->alt_ioc != NULL) {
2442 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2632 printk(MYIOC_s_WARN_FMT
2443 ioc_srch->name, ioc_srch->alt_ioc->name); 2633 "Oops, already bound (%s <==> %s)!\n",
2634 ioc_srch->name, ioc_srch->name,
2635 ioc_srch->alt_ioc->name);
2444 break; 2636 break;
2445 } 2637 }
2446 dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n", 2638 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2447 ioc->name, ioc_srch->name)); 2639 "FOUND! binding %s <==> %s\n",
2640 ioc->name, ioc->name, ioc_srch->name));
2448 ioc_srch->alt_ioc = ioc; 2641 ioc_srch->alt_ioc = ioc;
2449 ioc->alt_ioc = ioc_srch; 2642 ioc->alt_ioc = ioc_srch;
2450 } 2643 }
@@ -2464,8 +2657,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2464 int ret; 2657 int ret;
2465 2658
2466 if (ioc->cached_fw != NULL) { 2659 if (ioc->cached_fw != NULL) {
2467 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2660 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2468 "adapter\n", __func__, ioc->name)); 2661 "%s: Pushing FW onto adapter\n", __func__, ioc->name));
2469 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2662 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2470 ioc->cached_fw, CAN_SLEEP)) < 0) { 2663 ioc->cached_fw, CAN_SLEEP)) < 0) {
2471 printk(MYIOC_s_WARN_FMT 2664 printk(MYIOC_s_WARN_FMT
@@ -2474,11 +2667,30 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2474 } 2667 }
2475 } 2668 }
2476 2669
2670 /*
2671 * Put the controller into ready state (if its not already)
2672 */
2673 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
2674 if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
2675 CAN_SLEEP)) {
2676 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
2677 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
2678 "reset failed to put ioc in ready state!\n",
2679 ioc->name, __func__);
2680 } else
2681 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
2682 "failed!\n", ioc->name, __func__);
2683 }
2684
2685
2477 /* Disable adapter interrupts! */ 2686 /* Disable adapter interrupts! */
2687 synchronize_irq(ioc->pcidev->irq);
2478 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2688 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2479 ioc->active = 0; 2689 ioc->active = 0;
2690
2480 /* Clear any lingering interrupt */ 2691 /* Clear any lingering interrupt */
2481 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2692 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2693 CHIPREG_READ32(&ioc->chip->IntStatus);
2482 2694
2483 if (ioc->alloc != NULL) { 2695 if (ioc->alloc != NULL) {
2484 sz = ioc->alloc_sz; 2696 sz = ioc->alloc_sz;
@@ -2538,19 +2750,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2538 if((ret = mpt_host_page_access_control(ioc, 2750 if((ret = mpt_host_page_access_control(ioc,
2539 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { 2751 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2540 printk(MYIOC_s_ERR_FMT 2752 printk(MYIOC_s_ERR_FMT
2541 "host page buffers free failed (%d)!\n", 2753 ": %s: host page buffers free failed (%d)!\n",
2542 ioc->name, ret); 2754 ioc->name, __func__, ret);
2543 } 2755 }
2544 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n", 2756 dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2545 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); 2757 "HostPageBuffer free @ %p, sz=%d bytes\n",
2758 ioc->name, ioc->HostPageBuffer,
2759 ioc->HostPageBuffer_sz));
2546 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, 2760 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2547 ioc->HostPageBuffer, ioc->HostPageBuffer_dma); 2761 ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
2548 ioc->HostPageBuffer = NULL; 2762 ioc->HostPageBuffer = NULL;
2549 ioc->HostPageBuffer_sz = 0; 2763 ioc->HostPageBuffer_sz = 0;
2550 ioc->alloc_total -= ioc->HostPageBuffer_sz; 2764 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2551 } 2765 }
2552}
2553 2766
2767 pci_set_drvdata(ioc->pcidev, NULL);
2768}
2554/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2769/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2555/** 2770/**
2556 * mpt_adapter_dispose - Free all resources associated with an MPT adapter 2771 * mpt_adapter_dispose - Free all resources associated with an MPT adapter
@@ -2690,8 +2905,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2690 } 2905 }
2691 2906
2692 /* Is it already READY? */ 2907 /* Is it already READY? */
2693 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) 2908 if (!statefault &&
2909 ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
2910 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2911 "IOC is in READY state\n", ioc->name));
2694 return 0; 2912 return 0;
2913 }
2695 2914
2696 /* 2915 /*
2697 * Check to see if IOC is in FAULT state. 2916 * Check to see if IOC is in FAULT state.
@@ -2764,8 +2983,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2764 2983
2765 ii++; cntdn--; 2984 ii++; cntdn--;
2766 if (!cntdn) { 2985 if (!cntdn) {
2767 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 2986 printk(MYIOC_s_ERR_FMT
2768 ioc->name, (int)((ii+5)/HZ)); 2987 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
2988 ioc->name, ioc_state, (int)((ii+5)/HZ));
2769 return -ETIME; 2989 return -ETIME;
2770 } 2990 }
2771 2991
@@ -2778,9 +2998,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2778 } 2998 }
2779 2999
2780 if (statefault < 3) { 3000 if (statefault < 3) {
2781 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", 3001 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
2782 ioc->name, 3002 statefault == 1 ? "stuck handshake" : "IOC FAULT");
2783 statefault==1 ? "stuck handshake" : "IOC FAULT");
2784 } 3003 }
2785 3004
2786 return hard_reset_done; 3005 return hard_reset_done;
@@ -2833,8 +3052,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2833 3052
2834 /* IOC *must* NOT be in RESET state! */ 3053 /* IOC *must* NOT be in RESET state! */
2835 if (ioc->last_state == MPI_IOC_STATE_RESET) { 3054 if (ioc->last_state == MPI_IOC_STATE_RESET) {
2836 printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n", 3055 printk(KERN_ERR MYNAM
2837 ioc->name, ioc->last_state ); 3056 ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
3057 ioc->name, ioc->last_state);
2838 return -44; 3058 return -44;
2839 } 3059 }
2840 3060
@@ -2896,7 +3116,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2896 * Old: u16{Major(4),Minor(4),SubMinor(8)} 3116 * Old: u16{Major(4),Minor(4),SubMinor(8)}
2897 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} 3117 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
2898 */ 3118 */
2899 if (facts->MsgVersion < 0x0102) { 3119 if (facts->MsgVersion < MPI_VERSION_01_02) {
2900 /* 3120 /*
2901 * Handle old FC f/w style, convert to new... 3121 * Handle old FC f/w style, convert to new...
2902 */ 3122 */
@@ -2908,9 +3128,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2908 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); 3128 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
2909 3129
2910 facts->ProductID = le16_to_cpu(facts->ProductID); 3130 facts->ProductID = le16_to_cpu(facts->ProductID);
3131
2911 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) 3132 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2912 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) 3133 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
2913 ioc->ir_firmware = 1; 3134 ioc->ir_firmware = 1;
3135
2914 facts->CurrentHostMfaHighAddr = 3136 facts->CurrentHostMfaHighAddr =
2915 le32_to_cpu(facts->CurrentHostMfaHighAddr); 3137 le32_to_cpu(facts->CurrentHostMfaHighAddr);
2916 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); 3138 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
@@ -2926,7 +3148,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2926 * to 14 in MPI-1.01.0x. 3148 * to 14 in MPI-1.01.0x.
2927 */ 3149 */
2928 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && 3150 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
2929 facts->MsgVersion > 0x0100) { 3151 facts->MsgVersion > MPI_VERSION_01_00) {
2930 facts->FWImageSize = le32_to_cpu(facts->FWImageSize); 3152 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
2931 } 3153 }
2932 3154
@@ -3108,6 +3330,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3108 3330
3109 ioc_init.MaxDevices = (U8)ioc->devices_per_bus; 3331 ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
3110 ioc_init.MaxBuses = (U8)ioc->number_of_buses; 3332 ioc_init.MaxBuses = (U8)ioc->number_of_buses;
3333
3111 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", 3334 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
3112 ioc->name, ioc->facts.MsgVersion)); 3335 ioc->name, ioc->facts.MsgVersion));
3113 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { 3336 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
@@ -3122,7 +3345,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3122 } 3345 }
3123 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 3346 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
3124 3347
3125 if (sizeof(dma_addr_t) == sizeof(u64)) { 3348 if (ioc->sg_addr_size == sizeof(u64)) {
3126 /* Save the upper 32-bits of the request 3349 /* Save the upper 32-bits of the request
3127 * (reply) and sense buffers. 3350 * (reply) and sense buffers.
3128 */ 3351 */
@@ -3325,11 +3548,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3325 FWUpload_t *prequest; 3548 FWUpload_t *prequest;
3326 FWUploadReply_t *preply; 3549 FWUploadReply_t *preply;
3327 FWUploadTCSGE_t *ptcsge; 3550 FWUploadTCSGE_t *ptcsge;
3328 int sgeoffset;
3329 u32 flagsLength; 3551 u32 flagsLength;
3330 int ii, sz, reply_sz; 3552 int ii, sz, reply_sz;
3331 int cmdStatus; 3553 int cmdStatus;
3332 3554 int request_size;
3333 /* If the image size is 0, we are done. 3555 /* If the image size is 0, we are done.
3334 */ 3556 */
3335 if ((sz = ioc->facts.FWImageSize) == 0) 3557 if ((sz = ioc->facts.FWImageSize) == 0)
@@ -3364,42 +3586,41 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3364 ptcsge->ImageSize = cpu_to_le32(sz); 3586 ptcsge->ImageSize = cpu_to_le32(sz);
3365 ptcsge++; 3587 ptcsge++;
3366 3588
3367 sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
3368
3369 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; 3589 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
3370 mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); 3590 ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
3371 3591 request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
3372 sgeoffset += sizeof(u32) + sizeof(dma_addr_t); 3592 ioc->SGE_size;
3373 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n", 3593 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
3374 ioc->name, prequest, sgeoffset)); 3594 " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
3595 ioc->facts.FWImageSize, request_size));
3375 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); 3596 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
3376 3597
3377 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, 3598 ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
3378 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); 3599 reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
3379 3600
3380 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); 3601 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
3602 "rc=%x \n", ioc->name, ii));
3381 3603
3382 cmdStatus = -EFAULT; 3604 cmdStatus = -EFAULT;
3383 if (ii == 0) { 3605 if (ii == 0) {
3384 /* Handshake transfer was complete and successful. 3606 /* Handshake transfer was complete and successful.
3385 * Check the Reply Frame. 3607 * Check the Reply Frame.
3386 */ 3608 */
3387 int status, transfer_sz; 3609 int status;
3388 status = le16_to_cpu(preply->IOCStatus); 3610 status = le16_to_cpu(preply->IOCStatus) &
3389 if (status == MPI_IOCSTATUS_SUCCESS) { 3611 MPI_IOCSTATUS_MASK;
3390 transfer_sz = le32_to_cpu(preply->ActualImageSize); 3612 if (status == MPI_IOCSTATUS_SUCCESS &&
3391 if (transfer_sz == sz) 3613 ioc->facts.FWImageSize ==
3614 le32_to_cpu(preply->ActualImageSize))
3392 cmdStatus = 0; 3615 cmdStatus = 0;
3393 }
3394 } 3616 }
3395 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", 3617 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
3396 ioc->name, cmdStatus)); 3618 ioc->name, cmdStatus));
3397 3619
3398 3620
3399 if (cmdStatus) { 3621 if (cmdStatus) {
3400 3622 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
3401 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n", 3623 "freeing image \n", ioc->name));
3402 ioc->name));
3403 mpt_free_fw_memory(ioc); 3624 mpt_free_fw_memory(ioc);
3404 } 3625 }
3405 kfree(prequest); 3626 kfree(prequest);
@@ -3723,6 +3944,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3723 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3944 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3724 3945
3725 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3946 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3947
3948 if (!ignore)
3949 return 0;
3950
3726 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3951 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3727 "address=%p\n", ioc->name, __func__, 3952 "address=%p\n", ioc->name, __func__,
3728 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3953 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
@@ -3740,6 +3965,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3740 "looking for READY STATE: doorbell=%x" 3965 "looking for READY STATE: doorbell=%x"
3741 " count=%d\n", 3966 " count=%d\n",
3742 ioc->name, doorbell, count)); 3967 ioc->name, doorbell, count));
3968
3743 if (doorbell == MPI_IOC_STATE_READY) { 3969 if (doorbell == MPI_IOC_STATE_READY) {
3744 return 1; 3970 return 1;
3745 } 3971 }
@@ -3890,6 +4116,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3890 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 4116 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3891 doorbell &= MPI_IOC_STATE_MASK; 4117 doorbell &= MPI_IOC_STATE_MASK;
3892 4118
4119 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4120 "looking for READY STATE: doorbell=%x"
4121 " count=%d\n", ioc->name, doorbell, count));
4122
3893 if (doorbell == MPI_IOC_STATE_READY) { 4123 if (doorbell == MPI_IOC_STATE_READY) {
3894 break; 4124 break;
3895 } 4125 }
@@ -3901,6 +4131,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3901 mdelay (1000); 4131 mdelay (1000);
3902 } 4132 }
3903 } 4133 }
4134
4135 if (doorbell != MPI_IOC_STATE_READY)
4136 printk(MYIOC_s_ERR_FMT "Failed to come READY "
4137 "after reset! IocState=%x", ioc->name,
4138 doorbell);
3904 } 4139 }
3905 } 4140 }
3906 4141
@@ -4019,8 +4254,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
4019 if (sleepFlag != CAN_SLEEP) 4254 if (sleepFlag != CAN_SLEEP)
4020 count *= 10; 4255 count *= 10;
4021 4256
4022 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 4257 printk(MYIOC_s_ERR_FMT
4023 ioc->name, (int)((count+5)/HZ)); 4258 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
4259 ioc->name, state, (int)((count+5)/HZ));
4024 return -ETIME; 4260 return -ETIME;
4025 } 4261 }
4026 4262
@@ -4090,24 +4326,29 @@ initChainBuffers(MPT_ADAPTER *ioc)
4090 * num_sge = num sge in request frame + last chain buffer 4326 * num_sge = num sge in request frame + last chain buffer
4091 * scale = num sge per chain buffer if no chain element 4327 * scale = num sge per chain buffer if no chain element
4092 */ 4328 */
4093 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4329 scale = ioc->req_sz / ioc->SGE_size;
4094 if (sizeof(dma_addr_t) == sizeof(u64)) 4330 if (ioc->sg_addr_size == sizeof(u64))
4095 num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4331 num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
4096 else 4332 else
4097 num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4333 num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
4098 4334
4099 if (sizeof(dma_addr_t) == sizeof(u64)) { 4335 if (ioc->sg_addr_size == sizeof(u64)) {
4100 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4336 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
4101 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4337 (ioc->req_sz - 60) / ioc->SGE_size;
4102 } else { 4338 } else {
4103 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4339 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
4104 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4340 scale + (ioc->req_sz - 64) / ioc->SGE_size;
4105 } 4341 }
4106 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", 4342 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
4107 ioc->name, num_sge, numSGE)); 4343 ioc->name, num_sge, numSGE));
4108 4344
4109 if ( numSGE > MPT_SCSI_SG_DEPTH ) 4345 if (ioc->bus_type == FC) {
4110 numSGE = MPT_SCSI_SG_DEPTH; 4346 if (numSGE > MPT_SCSI_FC_SG_DEPTH)
4347 numSGE = MPT_SCSI_FC_SG_DEPTH;
4348 } else {
4349 if (numSGE > MPT_SCSI_SG_DEPTH)
4350 numSGE = MPT_SCSI_SG_DEPTH;
4351 }
4111 4352
4112 num_chain = 1; 4353 num_chain = 1;
4113 while (numSGE - num_sge > 0) { 4354 while (numSGE - num_sge > 0) {
@@ -4161,12 +4402,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4161 dma_addr_t alloc_dma; 4402 dma_addr_t alloc_dma;
4162 u8 *mem; 4403 u8 *mem;
4163 int i, reply_sz, sz, total_size, num_chain; 4404 int i, reply_sz, sz, total_size, num_chain;
4405 u64 dma_mask;
4406
4407 dma_mask = 0;
4164 4408
4165 /* Prime reply FIFO... */ 4409 /* Prime reply FIFO... */
4166 4410
4167 if (ioc->reply_frames == NULL) { 4411 if (ioc->reply_frames == NULL) {
4168 if ( (num_chain = initChainBuffers(ioc)) < 0) 4412 if ( (num_chain = initChainBuffers(ioc)) < 0)
4169 return -1; 4413 return -1;
4414 /*
4415 * 1078 errata workaround for the 36GB limitation
4416 */
4417 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
4418 ioc->dma_mask > DMA_35BIT_MASK) {
4419 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
4420 && !pci_set_consistent_dma_mask(ioc->pcidev,
4421 DMA_BIT_MASK(32))) {
4422 dma_mask = DMA_35BIT_MASK;
4423 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4424 "setting 35 bit addressing for "
4425 "Request/Reply/Chain and Sense Buffers\n",
4426 ioc->name));
4427 } else {
4428 /*Reseting DMA mask to 64 bit*/
4429 pci_set_dma_mask(ioc->pcidev,
4430 DMA_BIT_MASK(64));
4431 pci_set_consistent_dma_mask(ioc->pcidev,
4432 DMA_BIT_MASK(64));
4433
4434 printk(MYIOC_s_ERR_FMT
4435 "failed setting 35 bit addressing for "
4436 "Request/Reply/Chain and Sense Buffers\n",
4437 ioc->name);
4438 return -1;
4439 }
4440 }
4170 4441
4171 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); 4442 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
4172 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", 4443 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
@@ -4305,9 +4576,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4305 alloc_dma += ioc->reply_sz; 4576 alloc_dma += ioc->reply_sz;
4306 } 4577 }
4307 4578
4579 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4580 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
4581 ioc->dma_mask))
4582 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4583 "restoring 64 bit addressing\n", ioc->name));
4584
4308 return 0; 4585 return 0;
4309 4586
4310out_fail: 4587out_fail:
4588
4311 if (ioc->alloc != NULL) { 4589 if (ioc->alloc != NULL) {
4312 sz = ioc->alloc_sz; 4590 sz = ioc->alloc_sz;
4313 pci_free_consistent(ioc->pcidev, 4591 pci_free_consistent(ioc->pcidev,
@@ -4324,6 +4602,13 @@ out_fail:
4324 ioc->sense_buf_pool, ioc->sense_buf_pool_dma); 4602 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
4325 ioc->sense_buf_pool = NULL; 4603 ioc->sense_buf_pool = NULL;
4326 } 4604 }
4605
4606 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4607 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
4608 DMA_BIT_MASK(64)))
4609 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4610 "restoring 64 bit addressing\n", ioc->name));
4611
4327 return -1; 4612 return -1;
4328} 4613}
4329 4614
@@ -4759,7 +5044,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4759 SasIoUnitControlReply_t *sasIoUnitCntrReply; 5044 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4760 MPT_FRAME_HDR *mf = NULL; 5045 MPT_FRAME_HDR *mf = NULL;
4761 MPIHeader_t *mpi_hdr; 5046 MPIHeader_t *mpi_hdr;
5047 int ret = 0;
5048 unsigned long timeleft;
5049
5050 mutex_lock(&ioc->mptbase_cmds.mutex);
4762 5051
5052 /* init the internal cmd struct */
5053 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
5054 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
4763 5055
4764 /* insure garbage is not sent to fw */ 5056 /* insure garbage is not sent to fw */
4765 switch(persist_opcode) { 5057 switch(persist_opcode) {
@@ -4769,17 +5061,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4769 break; 5061 break;
4770 5062
4771 default: 5063 default:
4772 return -1; 5064 ret = -1;
4773 break; 5065 goto out;
4774 } 5066 }
4775 5067
4776 printk("%s: persist_opcode=%x\n",__func__, persist_opcode); 5068 printk(KERN_DEBUG "%s: persist_opcode=%x\n",
5069 __func__, persist_opcode);
4777 5070
4778 /* Get a MF for this command. 5071 /* Get a MF for this command.
4779 */ 5072 */
4780 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5073 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4781 printk("%s: no msg frames!\n",__func__); 5074 printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
4782 return -1; 5075 ret = -1;
5076 goto out;
4783 } 5077 }
4784 5078
4785 mpi_hdr = (MPIHeader_t *) mf; 5079 mpi_hdr = (MPIHeader_t *) mf;
@@ -4789,27 +5083,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4789 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; 5083 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4790 sasIoUnitCntrReq->Operation = persist_opcode; 5084 sasIoUnitCntrReq->Operation = persist_opcode;
4791 5085
4792 init_timer(&ioc->persist_timer);
4793 ioc->persist_timer.data = (unsigned long) ioc;
4794 ioc->persist_timer.function = mpt_timer_expired;
4795 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4796 ioc->persist_wait_done=0;
4797 add_timer(&ioc->persist_timer);
4798 mpt_put_msg_frame(mpt_base_index, ioc, mf); 5086 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4799 wait_event(mpt_waitq, ioc->persist_wait_done); 5087 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
5088 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
5089 ret = -ETIME;
5090 printk(KERN_DEBUG "%s: failed\n", __func__);
5091 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
5092 goto out;
5093 if (!timeleft) {
5094 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
5095 ioc->name, __func__);
5096 mpt_HardResetHandler(ioc, CAN_SLEEP);
5097 mpt_free_msg_frame(ioc, mf);
5098 }
5099 goto out;
5100 }
5101
5102 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
5103 ret = -1;
5104 goto out;
5105 }
4800 5106
4801 sasIoUnitCntrReply = 5107 sasIoUnitCntrReply =
4802 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 5108 (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
4803 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 5109 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4804 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 5110 printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4805 __func__, 5111 __func__, sasIoUnitCntrReply->IOCStatus,
4806 sasIoUnitCntrReply->IOCStatus,
4807 sasIoUnitCntrReply->IOCLogInfo); 5112 sasIoUnitCntrReply->IOCLogInfo);
4808 return -1; 5113 printk(KERN_DEBUG "%s: failed\n", __func__);
4809 } 5114 ret = -1;
5115 } else
5116 printk(KERN_DEBUG "%s: success\n", __func__);
5117 out:
4810 5118
4811 printk("%s: success\n",__func__); 5119 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
4812 return 0; 5120 mutex_unlock(&ioc->mptbase_cmds.mutex);
5121 return ret;
4813} 5122}
4814 5123
4815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5124/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5394,17 +5703,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
5394 * -ENOMEM if pci_alloc failed 5703 * -ENOMEM if pci_alloc failed
5395 **/ 5704 **/
5396int 5705int
5397mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk) 5706mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
5707 RaidPhysDiskPage0_t *phys_disk)
5398{ 5708{
5399 CONFIGPARMS cfg; 5709 CONFIGPARMS cfg;
5400 ConfigPageHeader_t hdr; 5710 ConfigPageHeader_t hdr;
5401 dma_addr_t dma_handle; 5711 dma_addr_t dma_handle;
5402 pRaidPhysDiskPage0_t buffer = NULL; 5712 pRaidPhysDiskPage0_t buffer = NULL;
5403 int rc; 5713 int rc;
5404 5714
5405 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5715 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5406 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5716 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5717 memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
5407 5718
5719 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
5408 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5720 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5409 cfg.cfghdr.hdr = &hdr; 5721 cfg.cfghdr.hdr = &hdr;
5410 cfg.physAddr = -1; 5722 cfg.physAddr = -1;
@@ -5451,6 +5763,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
5451} 5763}
5452 5764
5453/** 5765/**
5766 * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
5767 * @ioc: Pointer to a Adapter Structure
5768 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5769 *
5770 * Return:
5771 * returns number paths
5772 **/
5773int
5774mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
5775{
5776 CONFIGPARMS cfg;
5777 ConfigPageHeader_t hdr;
5778 dma_addr_t dma_handle;
5779 pRaidPhysDiskPage1_t buffer = NULL;
5780 int rc;
5781
5782 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5783 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5784
5785 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5786 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5787 hdr.PageNumber = 1;
5788 cfg.cfghdr.hdr = &hdr;
5789 cfg.physAddr = -1;
5790 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5791
5792 if (mpt_config(ioc, &cfg) != 0) {
5793 rc = 0;
5794 goto out;
5795 }
5796
5797 if (!hdr.PageLength) {
5798 rc = 0;
5799 goto out;
5800 }
5801
5802 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5803 &dma_handle);
5804
5805 if (!buffer) {
5806 rc = 0;
5807 goto out;
5808 }
5809
5810 cfg.physAddr = dma_handle;
5811 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5812 cfg.pageAddr = phys_disk_num;
5813
5814 if (mpt_config(ioc, &cfg) != 0) {
5815 rc = 0;
5816 goto out;
5817 }
5818
5819 rc = buffer->NumPhysDiskPaths;
5820 out:
5821
5822 if (buffer)
5823 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5824 dma_handle);
5825
5826 return rc;
5827}
5828EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
5829
5830/**
5831 * mpt_raid_phys_disk_pg1 - returns phys disk page 1
5832 * @ioc: Pointer to a Adapter Structure
5833 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5834 * @phys_disk: requested payload data returned
5835 *
5836 * Return:
5837 * 0 on success
5838 * -EFAULT if read of config page header fails or data pointer not NULL
5839 * -ENOMEM if pci_alloc failed
5840 **/
5841int
5842mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
5843 RaidPhysDiskPage1_t *phys_disk)
5844{
5845 CONFIGPARMS cfg;
5846 ConfigPageHeader_t hdr;
5847 dma_addr_t dma_handle;
5848 pRaidPhysDiskPage1_t buffer = NULL;
5849 int rc;
5850 int i;
5851 __le64 sas_address;
5852
5853 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5854 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5855 rc = 0;
5856
5857 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5858 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5859 hdr.PageNumber = 1;
5860 cfg.cfghdr.hdr = &hdr;
5861 cfg.physAddr = -1;
5862 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5863
5864 if (mpt_config(ioc, &cfg) != 0) {
5865 rc = -EFAULT;
5866 goto out;
5867 }
5868
5869 if (!hdr.PageLength) {
5870 rc = -EFAULT;
5871 goto out;
5872 }
5873
5874 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5875 &dma_handle);
5876
5877 if (!buffer) {
5878 rc = -ENOMEM;
5879 goto out;
5880 }
5881
5882 cfg.physAddr = dma_handle;
5883 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5884 cfg.pageAddr = phys_disk_num;
5885
5886 if (mpt_config(ioc, &cfg) != 0) {
5887 rc = -EFAULT;
5888 goto out;
5889 }
5890
5891 phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
5892 phys_disk->PhysDiskNum = phys_disk_num;
5893 for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
5894 phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
5895 phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
5896 phys_disk->Path[i].OwnerIdentifier =
5897 buffer->Path[i].OwnerIdentifier;
5898 phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
5899 memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
5900 sas_address = le64_to_cpu(sas_address);
5901 memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
5902 memcpy(&sas_address,
5903 &buffer->Path[i].OwnerWWID, sizeof(__le64));
5904 sas_address = le64_to_cpu(sas_address);
5905 memcpy(&phys_disk->Path[i].OwnerWWID,
5906 &sas_address, sizeof(__le64));
5907 }
5908
5909 out:
5910
5911 if (buffer)
5912 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5913 dma_handle);
5914
5915 return rc;
5916}
5917EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
5918
5919
5920/**
5454 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes 5921 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
5455 * @ioc: Pointer to a Adapter Strucutre 5922 * @ioc: Pointer to a Adapter Strucutre
5456 * 5923 *
@@ -5775,30 +6242,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
5775 * SendEventNotification - Send EventNotification (on or off) request to adapter 6242 * SendEventNotification - Send EventNotification (on or off) request to adapter
5776 * @ioc: Pointer to MPT_ADAPTER structure 6243 * @ioc: Pointer to MPT_ADAPTER structure
5777 * @EvSwitch: Event switch flags 6244 * @EvSwitch: Event switch flags
6245 * @sleepFlag: Specifies whether the process can sleep
5778 */ 6246 */
5779static int 6247static int
5780SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch) 6248SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
5781{ 6249{
5782 EventNotification_t *evnp; 6250 EventNotification_t evn;
6251 MPIDefaultReply_t reply_buf;
5783 6252
5784 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); 6253 memset(&evn, 0, sizeof(EventNotification_t));
5785 if (evnp == NULL) { 6254 memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
5786 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
5787 ioc->name));
5788 return 0;
5789 }
5790 memset(evnp, 0, sizeof(*evnp));
5791
5792 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
5793 6255
5794 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 6256 evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
5795 evnp->ChainOffset = 0; 6257 evn.Switch = EvSwitch;
5796 evnp->MsgFlags = 0; 6258 evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
5797 evnp->Switch = EvSwitch;
5798 6259
5799 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp); 6260 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6261 "Sending EventNotification (%d) request %p\n",
6262 ioc->name, EvSwitch, &evn));
5800 6263
5801 return 0; 6264 return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
6265 (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
6266 sleepFlag);
5802} 6267}
5803 6268
5804/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6269/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5814,7 +6279,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5814 6279
5815 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6280 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5816 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 6281 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5817 ioc->name,__func__)); 6282 ioc->name, __func__));
5818 return -1; 6283 return -1;
5819 } 6284 }
5820 6285
@@ -5851,12 +6316,19 @@ int
5851mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 6316mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5852{ 6317{
5853 Config_t *pReq; 6318 Config_t *pReq;
6319 ConfigReply_t *pReply;
5854 ConfigExtendedPageHeader_t *pExtHdr = NULL; 6320 ConfigExtendedPageHeader_t *pExtHdr = NULL;
5855 MPT_FRAME_HDR *mf; 6321 MPT_FRAME_HDR *mf;
5856 unsigned long flags; 6322 int ii;
5857 int ii, rc;
5858 int flagsLength; 6323 int flagsLength;
5859 int in_isr; 6324 long timeout;
6325 int ret;
6326 u8 page_type = 0, extend_page;
6327 unsigned long timeleft;
6328 unsigned long flags;
6329 int in_isr;
6330 u8 issue_hard_reset = 0;
6331 u8 retry_count = 0;
5860 6332
5861 /* Prevent calling wait_event() (below), if caller happens 6333 /* Prevent calling wait_event() (below), if caller happens
5862 * to be in ISR context, because that is fatal! 6334 * to be in ISR context, because that is fatal!
@@ -5866,15 +6338,43 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5866 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", 6338 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
5867 ioc->name)); 6339 ioc->name));
5868 return -EPERM; 6340 return -EPERM;
6341 }
6342
6343 /* don't send a config page during diag reset */
6344 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6345 if (ioc->ioc_reset_in_progress) {
6346 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6347 "%s: busy with host reset\n", ioc->name, __func__));
6348 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6349 return -EBUSY;
6350 }
6351 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6352
6353 /* don't send if no chance of success */
6354 if (!ioc->active ||
6355 mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
6356 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6357 "%s: ioc not operational, %d, %xh\n",
6358 ioc->name, __func__, ioc->active,
6359 mpt_GetIocState(ioc, 0)));
6360 return -EFAULT;
5869 } 6361 }
5870 6362
6363 retry_config:
6364 mutex_lock(&ioc->mptbase_cmds.mutex);
6365 /* init the internal cmd struct */
6366 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
6367 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
6368
5871 /* Get and Populate a free Frame 6369 /* Get and Populate a free Frame
5872 */ 6370 */
5873 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6371 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5874 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", 6372 dcprintk(ioc, printk(MYIOC_s_WARN_FMT
5875 ioc->name)); 6373 "mpt_config: no msg frames!\n", ioc->name));
5876 return -EAGAIN; 6374 ret = -EAGAIN;
6375 goto out;
5877 } 6376 }
6377
5878 pReq = (Config_t *)mf; 6378 pReq = (Config_t *)mf;
5879 pReq->Action = pCfg->action; 6379 pReq->Action = pCfg->action;
5880 pReq->Reserved = 0; 6380 pReq->Reserved = 0;
@@ -5900,7 +6400,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5900 pReq->ExtPageType = pExtHdr->ExtPageType; 6400 pReq->ExtPageType = pExtHdr->ExtPageType;
5901 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 6401 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
5902 6402
5903 /* Page Length must be treated as a reserved field for the extended header. */ 6403 /* Page Length must be treated as a reserved field for the
6404 * extended header.
6405 */
5904 pReq->Header.PageLength = 0; 6406 pReq->Header.PageLength = 0;
5905 } 6407 }
5906 6408
@@ -5913,78 +6415,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5913 else 6415 else
5914 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 6416 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
5915 6417
5916 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { 6418 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
6419 MPI_CONFIG_PAGETYPE_EXTENDED) {
5917 flagsLength |= pExtHdr->ExtPageLength * 4; 6420 flagsLength |= pExtHdr->ExtPageLength * 4;
5918 6421 page_type = pReq->ExtPageType;
5919 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6422 extend_page = 1;
5920 ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action)); 6423 } else {
5921 }
5922 else {
5923 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; 6424 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
5924 6425 page_type = pReq->Header.PageType;
5925 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6426 extend_page = 0;
5926 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
5927 } 6427 }
5928 6428
5929 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); 6429 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5930 6430 "Sending Config request type 0x%x, page 0x%x and action %d\n",
5931 /* Append pCfg pointer to end of mf 6431 ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
5932 */
5933 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5934
5935 /* Initalize the timer
5936 */
5937 init_timer_on_stack(&pCfg->timer);
5938 pCfg->timer.data = (unsigned long) ioc;
5939 pCfg->timer.function = mpt_timer_expired;
5940 pCfg->wait_done = 0;
5941
5942 /* Set the timer; ensure 10 second minimum */
5943 if (pCfg->timeout < 10)
5944 pCfg->timer.expires = jiffies + HZ*10;
5945 else
5946 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5947
5948 /* Add to end of Q, set timer and then issue this command */
5949 spin_lock_irqsave(&ioc->FreeQlock, flags);
5950 list_add_tail(&pCfg->linkage, &ioc->configQ);
5951 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5952 6432
5953 add_timer(&pCfg->timer); 6433 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
6434 timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
5954 mpt_put_msg_frame(mpt_base_index, ioc, mf); 6435 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5955 wait_event(mpt_waitq, pCfg->wait_done); 6436 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
6437 timeout);
6438 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
6439 ret = -ETIME;
6440 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6441 "Failed Sending Config request type 0x%x, page 0x%x,"
6442 " action %d, status %xh, time left %ld\n\n",
6443 ioc->name, page_type, pReq->Header.PageNumber,
6444 pReq->Action, ioc->mptbase_cmds.status, timeleft));
6445 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
6446 goto out;
6447 if (!timeleft)
6448 issue_hard_reset = 1;
6449 goto out;
6450 }
5956 6451
5957 /* mf has been freed - do not access */ 6452 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
6453 ret = -1;
6454 goto out;
6455 }
6456 pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
6457 ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
6458 if (ret == MPI_IOCSTATUS_SUCCESS) {
6459 if (extend_page) {
6460 pCfg->cfghdr.ehdr->ExtPageLength =
6461 le16_to_cpu(pReply->ExtPageLength);
6462 pCfg->cfghdr.ehdr->ExtPageType =
6463 pReply->ExtPageType;
6464 }
6465 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
6466 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
6467 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
6468 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
5958 6469
5959 rc = pCfg->status; 6470 }
5960 6471
5961 return rc; 6472 if (retry_count)
5962} 6473 printk(MYIOC_s_INFO_FMT "Retry completed "
6474 "ret=0x%x timeleft=%ld\n",
6475 ioc->name, ret, timeleft);
5963 6476
5964/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6477 dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
5965/** 6478 ret, le32_to_cpu(pReply->IOCLogInfo)));
5966 * mpt_timer_expired - Callback for timer process.
5967 * Used only internal config functionality.
5968 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
5969 */
5970static void
5971mpt_timer_expired(unsigned long data)
5972{
5973 MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
5974
5975 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name));
5976 6479
5977 /* Perform a FW reload */ 6480out:
5978 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
5979 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
5980 6481
5981 /* No more processing. 6482 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
5982 * Hard reset clean-up will wake up 6483 mutex_unlock(&ioc->mptbase_cmds.mutex);
5983 * process and free all resources. 6484 if (issue_hard_reset) {
5984 */ 6485 issue_hard_reset = 0;
5985 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name)); 6486 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
6487 ioc->name, __func__);
6488 mpt_HardResetHandler(ioc, CAN_SLEEP);
6489 mpt_free_msg_frame(ioc, mf);
6490 /* attempt one retry for a timed out command */
6491 if (!retry_count) {
6492 printk(MYIOC_s_INFO_FMT
6493 "Attempting Retry Config request"
6494 " type 0x%x, page 0x%x,"
6495 " action %d\n", ioc->name, page_type,
6496 pCfg->cfghdr.hdr->PageNumber, pCfg->action);
6497 retry_count++;
6498 goto retry_config;
6499 }
6500 }
6501 return ret;
5986 6502
5987 return;
5988} 6503}
5989 6504
5990/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6505/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,41 +6513,34 @@ mpt_timer_expired(unsigned long data)
5998static int 6513static int
5999mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 6514mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
6000{ 6515{
6001 CONFIGPARMS *pCfg; 6516 switch (reset_phase) {
6002 unsigned long flags; 6517 case MPT_IOC_SETUP_RESET:
6003 6518 ioc->taskmgmt_quiesce_io = 1;
6004 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6519 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6005 ": IOC %s_reset routed to MPT base driver!\n", 6520 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
6006 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 6521 break;
6007 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); 6522 case MPT_IOC_PRE_RESET:
6008 6523 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6009 if (reset_phase == MPT_IOC_SETUP_RESET) { 6524 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
6010 ; 6525 break;
6011 } else if (reset_phase == MPT_IOC_PRE_RESET) { 6526 case MPT_IOC_POST_RESET:
6012 /* If the internal config Q is not empty - 6527 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6013 * delete timer. MF resources will be freed when 6528 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
6014 * the FIFO's are primed. 6529/* wake up mptbase_cmds */
6015 */ 6530 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
6016 spin_lock_irqsave(&ioc->FreeQlock, flags); 6531 ioc->mptbase_cmds.status |=
6017 list_for_each_entry(pCfg, &ioc->configQ, linkage) 6532 MPT_MGMT_STATUS_DID_IOCRESET;
6018 del_timer(&pCfg->timer); 6533 complete(&ioc->mptbase_cmds.done);
6019 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
6020
6021 } else {
6022 CONFIGPARMS *pNext;
6023
6024 /* Search the configQ for internal commands.
6025 * Flush the Q, and wake up all suspended threads.
6026 */
6027 spin_lock_irqsave(&ioc->FreeQlock, flags);
6028 list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
6029 list_del(&pCfg->linkage);
6030
6031 pCfg->status = MPT_CONFIG_ERROR;
6032 pCfg->wait_done = 1;
6033 wake_up(&mpt_waitq);
6034 } 6534 }
6035 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 6535/* wake up taskmgmt_cmds */
6536 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
6537 ioc->taskmgmt_cmds.status |=
6538 MPT_MGMT_STATUS_DID_IOCRESET;
6539 complete(&ioc->taskmgmt_cmds.done);
6540 }
6541 break;
6542 default:
6543 break;
6036 } 6544 }
6037 6545
6038 return 1; /* currently means nothing really */ 6546 return 1; /* currently means nothing really */
@@ -6344,6 +6852,59 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
6344 6852
6345 *size = y; 6853 *size = y;
6346} 6854}
6855/**
6856 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
6857 * @ioc: Pointer to MPT_ADAPTER structure
6858 *
6859 * Returns 0 for SUCCESS or -1 if FAILED.
6860 *
6861 * If -1 is return, then it was not possible to set the flags
6862 **/
6863int
6864mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6865{
6866 unsigned long flags;
6867 int retval;
6868
6869 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6870 if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
6871 (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
6872 retval = -1;
6873 goto out;
6874 }
6875 retval = 0;
6876 ioc->taskmgmt_in_progress = 1;
6877 ioc->taskmgmt_quiesce_io = 1;
6878 if (ioc->alt_ioc) {
6879 ioc->alt_ioc->taskmgmt_in_progress = 1;
6880 ioc->alt_ioc->taskmgmt_quiesce_io = 1;
6881 }
6882 out:
6883 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6884 return retval;
6885}
6886EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
6887
6888/**
6889 * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
6890 * @ioc: Pointer to MPT_ADAPTER structure
6891 *
6892 **/
6893void
6894mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6895{
6896 unsigned long flags;
6897
6898 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6899 ioc->taskmgmt_in_progress = 0;
6900 ioc->taskmgmt_quiesce_io = 0;
6901 if (ioc->alt_ioc) {
6902 ioc->alt_ioc->taskmgmt_in_progress = 0;
6903 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
6904 }
6905 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6906}
6907EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
6347 6908
6348 6909
6349/** 6910/**
@@ -6397,7 +6958,9 @@ int
6397mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 6958mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6398{ 6959{
6399 int rc; 6960 int rc;
6961 u8 cb_idx;
6400 unsigned long flags; 6962 unsigned long flags;
6963 unsigned long time_count;
6401 6964
6402 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); 6965 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
6403#ifdef MFCNT 6966#ifdef MFCNT
@@ -6410,14 +6973,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6410 /* Reset the adapter. Prevent more than 1 call to 6973 /* Reset the adapter. Prevent more than 1 call to
6411 * mpt_do_ioc_recovery at any instant in time. 6974 * mpt_do_ioc_recovery at any instant in time.
6412 */ 6975 */
6413 spin_lock_irqsave(&ioc->diagLock, flags); 6976 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6414 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ 6977 if (ioc->ioc_reset_in_progress) {
6415 spin_unlock_irqrestore(&ioc->diagLock, flags); 6978 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6416 return 0; 6979 return 0;
6417 } else {
6418 ioc->diagPending = 1;
6419 } 6980 }
6420 spin_unlock_irqrestore(&ioc->diagLock, flags); 6981 ioc->ioc_reset_in_progress = 1;
6982 if (ioc->alt_ioc)
6983 ioc->alt_ioc->ioc_reset_in_progress = 1;
6984 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6421 6985
6422 /* FIXME: If do_ioc_recovery fails, repeat.... 6986 /* FIXME: If do_ioc_recovery fails, repeat....
6423 */ 6987 */
@@ -6427,47 +6991,57 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6427 * Prevents timeouts occurring during a diagnostic reset...very bad. 6991 * Prevents timeouts occurring during a diagnostic reset...very bad.
6428 * For all other protocol drivers, this is a no-op. 6992 * For all other protocol drivers, this is a no-op.
6429 */ 6993 */
6430 { 6994 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6431 u8 cb_idx; 6995 if (MptResetHandlers[cb_idx]) {
6432 int r = 0; 6996 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6433 6997 if (ioc->alt_ioc)
6434 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 6998 mpt_signal_reset(cb_idx, ioc->alt_ioc,
6435 if (MptResetHandlers[cb_idx]) { 6999 MPT_IOC_SETUP_RESET);
6436 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n",
6437 ioc->name, cb_idx));
6438 r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6439 if (ioc->alt_ioc) {
6440 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n",
6441 ioc->name, ioc->alt_ioc->name, cb_idx));
6442 r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
6443 }
6444 }
6445 } 7000 }
6446 } 7001 }
6447 7002
6448 if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { 7003 time_count = jiffies;
6449 printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc); 7004 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
7005 if (rc != 0) {
7006 printk(KERN_WARNING MYNAM
7007 ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
7008 } else {
7009 if (ioc->hard_resets < -1)
7010 ioc->hard_resets++;
6450 } 7011 }
6451 ioc->reload_fw = 0;
6452 if (ioc->alt_ioc)
6453 ioc->alt_ioc->reload_fw = 0;
6454 7012
6455 spin_lock_irqsave(&ioc->diagLock, flags); 7013 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6456 ioc->diagPending = 0; 7014 ioc->ioc_reset_in_progress = 0;
6457 if (ioc->alt_ioc) 7015 ioc->taskmgmt_quiesce_io = 0;
6458 ioc->alt_ioc->diagPending = 0; 7016 ioc->taskmgmt_in_progress = 0;
6459 spin_unlock_irqrestore(&ioc->diagLock, flags); 7017 if (ioc->alt_ioc) {
7018 ioc->alt_ioc->ioc_reset_in_progress = 0;
7019 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
7020 ioc->alt_ioc->taskmgmt_in_progress = 0;
7021 }
7022 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6460 7023
6461 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); 7024 dtmprintk(ioc,
7025 printk(MYIOC_s_DEBUG_FMT
7026 "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
7027 jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
7028 "SUCCESS" : "FAILED")));
6462 7029
6463 return rc; 7030 return rc;
6464} 7031}
6465 7032
6466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7033#ifdef CONFIG_FUSION_LOGGING
6467static void 7034static void
6468EventDescriptionStr(u8 event, u32 evData0, char *evStr) 7035mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
6469{ 7036{
6470 char *ds = NULL; 7037 char *ds = NULL;
7038 u32 evData0;
7039 int ii;
7040 u8 event;
7041 char *evStr = ioc->evStr;
7042
7043 event = le32_to_cpu(pEventReply->Event) & 0xFF;
7044 evData0 = le32_to_cpu(pEventReply->Data[0]);
6471 7045
6472 switch(event) { 7046 switch(event) {
6473 case MPI_EVENT_NONE: 7047 case MPI_EVENT_NONE:
@@ -6501,9 +7075,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6501 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 7075 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
6502 ds = "Loop State(LIP) Change"; 7076 ds = "Loop State(LIP) Change";
6503 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 7077 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
6504 ds = "Loop State(LPE) Change"; /* ??? */ 7078 ds = "Loop State(LPE) Change";
6505 else 7079 else
6506 ds = "Loop State(LPB) Change"; /* ??? */ 7080 ds = "Loop State(LPB) Change";
6507 break; 7081 break;
6508 case MPI_EVENT_LOGOUT: 7082 case MPI_EVENT_LOGOUT:
6509 ds = "Logout"; 7083 ds = "Logout";
@@ -6703,28 +7277,65 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6703 } 7277 }
6704 case MPI_EVENT_IR2: 7278 case MPI_EVENT_IR2:
6705 { 7279 {
7280 u8 id = (u8)(evData0);
7281 u8 channel = (u8)(evData0 >> 8);
7282 u8 phys_num = (u8)(evData0 >> 24);
6706 u8 ReasonCode = (u8)(evData0 >> 16); 7283 u8 ReasonCode = (u8)(evData0 >> 16);
7284
6707 switch (ReasonCode) { 7285 switch (ReasonCode) {
6708 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: 7286 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
6709 ds = "IR2: LD State Changed"; 7287 snprintf(evStr, EVENT_DESCR_STR_SZ,
7288 "IR2: LD State Changed: "
7289 "id=%d channel=%d phys_num=%d",
7290 id, channel, phys_num);
6710 break; 7291 break;
6711 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: 7292 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
6712 ds = "IR2: PD State Changed"; 7293 snprintf(evStr, EVENT_DESCR_STR_SZ,
7294 "IR2: PD State Changed "
7295 "id=%d channel=%d phys_num=%d",
7296 id, channel, phys_num);
6713 break; 7297 break;
6714 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: 7298 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
6715 ds = "IR2: Bad Block Table Full"; 7299 snprintf(evStr, EVENT_DESCR_STR_SZ,
7300 "IR2: Bad Block Table Full: "
7301 "id=%d channel=%d phys_num=%d",
7302 id, channel, phys_num);
6716 break; 7303 break;
6717 case MPI_EVENT_IR2_RC_PD_INSERTED: 7304 case MPI_EVENT_IR2_RC_PD_INSERTED:
6718 ds = "IR2: PD Inserted"; 7305 snprintf(evStr, EVENT_DESCR_STR_SZ,
7306 "IR2: PD Inserted: "
7307 "id=%d channel=%d phys_num=%d",
7308 id, channel, phys_num);
6719 break; 7309 break;
6720 case MPI_EVENT_IR2_RC_PD_REMOVED: 7310 case MPI_EVENT_IR2_RC_PD_REMOVED:
6721 ds = "IR2: PD Removed"; 7311 snprintf(evStr, EVENT_DESCR_STR_SZ,
7312 "IR2: PD Removed: "
7313 "id=%d channel=%d phys_num=%d",
7314 id, channel, phys_num);
6722 break; 7315 break;
6723 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: 7316 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
6724 ds = "IR2: Foreign CFG Detected"; 7317 snprintf(evStr, EVENT_DESCR_STR_SZ,
7318 "IR2: Foreign CFG Detected: "
7319 "id=%d channel=%d phys_num=%d",
7320 id, channel, phys_num);
6725 break; 7321 break;
6726 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: 7322 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
6727 ds = "IR2: Rebuild Medium Error"; 7323 snprintf(evStr, EVENT_DESCR_STR_SZ,
7324 "IR2: Rebuild Medium Error: "
7325 "id=%d channel=%d phys_num=%d",
7326 id, channel, phys_num);
7327 break;
7328 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
7329 snprintf(evStr, EVENT_DESCR_STR_SZ,
7330 "IR2: Dual Port Added: "
7331 "id=%d channel=%d phys_num=%d",
7332 id, channel, phys_num);
7333 break;
7334 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
7335 snprintf(evStr, EVENT_DESCR_STR_SZ,
7336 "IR2: Dual Port Removed: "
7337 "id=%d channel=%d phys_num=%d",
7338 id, channel, phys_num);
6728 break; 7339 break;
6729 default: 7340 default:
6730 ds = "IR2"; 7341 ds = "IR2";
@@ -6760,13 +7371,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6760 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 7371 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
6761 { 7372 {
6762 u8 reason = (u8)(evData0); 7373 u8 reason = (u8)(evData0);
6763 u8 port_num = (u8)(evData0 >> 8);
6764 u16 handle = le16_to_cpu(evData0 >> 16);
6765 7374
6766 snprintf(evStr, EVENT_DESCR_STR_SZ, 7375 switch (reason) {
6767 "SAS Initiator Device Status Change: reason=0x%02x " 7376 case MPI_EVENT_SAS_INIT_RC_ADDED:
6768 "port=%d handle=0x%04x", 7377 ds = "SAS Initiator Status Change: Added";
6769 reason, port_num, handle); 7378 break;
7379 case MPI_EVENT_SAS_INIT_RC_REMOVED:
7380 ds = "SAS Initiator Status Change: Deleted";
7381 break;
7382 default:
7383 ds = "SAS Initiator Status Change";
7384 break;
7385 }
6770 break; 7386 break;
6771 } 7387 }
6772 7388
@@ -6814,6 +7430,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6814 break; 7430 break;
6815 } 7431 }
6816 7432
7433 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
7434 {
7435 u8 reason = (u8)(evData0);
7436
7437 switch (reason) {
7438 case MPI_EVENT_SAS_EXP_RC_ADDED:
7439 ds = "Expander Status Change: Added";
7440 break;
7441 case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
7442 ds = "Expander Status Change: Deleted";
7443 break;
7444 default:
7445 ds = "Expander Status Change";
7446 break;
7447 }
7448 break;
7449 }
7450
6817 /* 7451 /*
6818 * MPT base "custom" events may be added here... 7452 * MPT base "custom" events may be added here...
6819 */ 7453 */
@@ -6823,8 +7457,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6823 } 7457 }
6824 if (ds) 7458 if (ds)
6825 strncpy(evStr, ds, EVENT_DESCR_STR_SZ); 7459 strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
6826}
6827 7460
7461
7462 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
7463 "MPT event:(%02Xh) : %s\n",
7464 ioc->name, event, evStr));
7465
7466 devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
7467 ": Event data:\n"));
7468 for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
7469 devtverboseprintk(ioc, printk(" %08x",
7470 le32_to_cpu(pEventReply->Data[ii])));
7471 devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
7472}
7473#endif
6828/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7474/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6829/** 7475/**
6830 * ProcessEventNotification - Route EventNotificationReply to all event handlers 7476 * ProcessEventNotification - Route EventNotificationReply to all event handlers
@@ -6841,37 +7487,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6841{ 7487{
6842 u16 evDataLen; 7488 u16 evDataLen;
6843 u32 evData0 = 0; 7489 u32 evData0 = 0;
6844// u32 evCtx;
6845 int ii; 7490 int ii;
6846 u8 cb_idx; 7491 u8 cb_idx;
6847 int r = 0; 7492 int r = 0;
6848 int handlers = 0; 7493 int handlers = 0;
6849 char evStr[EVENT_DESCR_STR_SZ];
6850 u8 event; 7494 u8 event;
6851 7495
6852 /* 7496 /*
6853 * Do platform normalization of values 7497 * Do platform normalization of values
6854 */ 7498 */
6855 event = le32_to_cpu(pEventReply->Event) & 0xFF; 7499 event = le32_to_cpu(pEventReply->Event) & 0xFF;
6856// evCtx = le32_to_cpu(pEventReply->EventContext);
6857 evDataLen = le16_to_cpu(pEventReply->EventDataLength); 7500 evDataLen = le16_to_cpu(pEventReply->EventDataLength);
6858 if (evDataLen) { 7501 if (evDataLen) {
6859 evData0 = le32_to_cpu(pEventReply->Data[0]); 7502 evData0 = le32_to_cpu(pEventReply->Data[0]);
6860 } 7503 }
6861 7504
6862 EventDescriptionStr(event, evData0, evStr);
6863 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n",
6864 ioc->name,
6865 event,
6866 evStr));
6867
6868#ifdef CONFIG_FUSION_LOGGING 7505#ifdef CONFIG_FUSION_LOGGING
6869 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7506 if (evDataLen)
6870 ": Event data:\n", ioc->name)); 7507 mpt_display_event_info(ioc, pEventReply);
6871 for (ii = 0; ii < evDataLen; ii++)
6872 devtverboseprintk(ioc, printk(" %08x",
6873 le32_to_cpu(pEventReply->Data[ii])));
6874 devtverboseprintk(ioc, printk("\n"));
6875#endif 7508#endif
6876 7509
6877 /* 7510 /*
@@ -6926,8 +7559,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6926 */ 7559 */
6927 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7560 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6928 if (MptEvHandlers[cb_idx]) { 7561 if (MptEvHandlers[cb_idx]) {
6929 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n", 7562 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6930 ioc->name, cb_idx)); 7563 "Routing Event to event handler #%d\n",
7564 ioc->name, cb_idx));
6931 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); 7565 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
6932 handlers++; 7566 handlers++;
6933 } 7567 }
@@ -7011,8 +7645,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
7011 switch (info) { 7645 switch (info) {
7012 case 0x00010000: 7646 case 0x00010000:
7013 desc = "bug! MID not found"; 7647 desc = "bug! MID not found";
7014 if (ioc->reload_fw == 0)
7015 ioc->reload_fw++;
7016 break; 7648 break;
7017 7649
7018 case 0x00020000: 7650 case 0x00020000:
@@ -7613,7 +8245,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame);
7613EXPORT_SYMBOL(mpt_put_msg_frame); 8245EXPORT_SYMBOL(mpt_put_msg_frame);
7614EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); 8246EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
7615EXPORT_SYMBOL(mpt_free_msg_frame); 8247EXPORT_SYMBOL(mpt_free_msg_frame);
7616EXPORT_SYMBOL(mpt_add_sge);
7617EXPORT_SYMBOL(mpt_send_handshake_request); 8248EXPORT_SYMBOL(mpt_send_handshake_request);
7618EXPORT_SYMBOL(mpt_verify_adapter); 8249EXPORT_SYMBOL(mpt_verify_adapter);
7619EXPORT_SYMBOL(mpt_GetIocState); 8250EXPORT_SYMBOL(mpt_GetIocState);
@@ -7650,7 +8281,7 @@ fusion_init(void)
7650 /* Register ourselves (mptbase) in order to facilitate 8281 /* Register ourselves (mptbase) in order to facilitate
7651 * EventNotification handling. 8282 * EventNotification handling.
7652 */ 8283 */
7653 mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); 8284 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
7654 8285
7655 /* Register for hard reset handling callbacks. 8286 /* Register for hard reset handling callbacks.
7656 */ 8287 */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b3e981d2a506..1c8514dc31ca 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.07" 79#define MPT_LINUX_VERSION_COMMON "3.04.10"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -104,6 +104,7 @@
104#endif 104#endif
105 105
106#define MPT_NAME_LENGTH 32 106#define MPT_NAME_LENGTH 32
107#define MPT_KOBJ_NAME_LEN 20
107 108
108#define MPT_PROCFS_MPTBASEDIR "mpt" 109#define MPT_PROCFS_MPTBASEDIR "mpt"
109 /* chg it to "driver/fusion" ? */ 110 /* chg it to "driver/fusion" ? */
@@ -134,6 +135,7 @@
134 135
135#define MPT_COALESCING_TIMEOUT 0x10 136#define MPT_COALESCING_TIMEOUT 0x10
136 137
138
137/* 139/*
138 * SCSI transfer rate defines. 140 * SCSI transfer rate defines.
139 */ 141 */
@@ -161,10 +163,10 @@
161/* 163/*
162 * Set the MAX_SGE value based on user input. 164 * Set the MAX_SGE value based on user input.
163 */ 165 */
164#ifdef CONFIG_FUSION_MAX_SGE 166#ifdef CONFIG_FUSION_MAX_SGE
165#if CONFIG_FUSION_MAX_SGE < 16 167#if CONFIG_FUSION_MAX_SGE < 16
166#define MPT_SCSI_SG_DEPTH 16 168#define MPT_SCSI_SG_DEPTH 16
167#elif CONFIG_FUSION_MAX_SGE > 128 169#elif CONFIG_FUSION_MAX_SGE > 128
168#define MPT_SCSI_SG_DEPTH 128 170#define MPT_SCSI_SG_DEPTH 128
169#else 171#else
170#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE 172#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
@@ -173,6 +175,18 @@
173#define MPT_SCSI_SG_DEPTH 40 175#define MPT_SCSI_SG_DEPTH 40
174#endif 176#endif
175 177
178#ifdef CONFIG_FUSION_MAX_FC_SGE
179#if CONFIG_FUSION_MAX_FC_SGE < 16
180#define MPT_SCSI_FC_SG_DEPTH 16
181#elif CONFIG_FUSION_MAX_FC_SGE > 256
182#define MPT_SCSI_FC_SG_DEPTH 256
183#else
184#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
185#endif
186#else
187#define MPT_SCSI_FC_SG_DEPTH 40
188#endif
189
176/* debug print string length used for events and iocstatus */ 190/* debug print string length used for events and iocstatus */
177# define EVENT_DESCR_STR_SZ 100 191# define EVENT_DESCR_STR_SZ 100
178 192
@@ -431,38 +445,36 @@ do { \
431 * IOCTL structure and associated defines 445 * IOCTL structure and associated defines
432 */ 446 */
433 447
434#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
435#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
436#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
437#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
438#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
439#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
440#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
441
442#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ 448#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
443 449
444typedef struct _MPT_IOCTL { 450#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
445 struct _MPT_ADAPTER *ioc; 451#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
446 u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 452#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
447 u8 sense[MPT_SENSE_BUFFER_ALLOC]; 453#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
448 int wait_done; /* wake-up value for this ioc */ 454 on the current*/
449 u8 rsvd; 455#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
450 u8 status; /* current command status */ 456#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
451 u8 reset; /* 1 if bus reset allowed */ 457#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
452 u8 id; /* target for reset */ 458 complete routine */
453 struct mutex ioctl_mutex; 459
454} MPT_IOCTL; 460#define INITIALIZE_MGMT_STATUS(status) \
455 461 status = MPT_MGMT_STATUS_PENDING;
456#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ 462#define CLEAR_MGMT_STATUS(status) \
457#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ 463 status = 0;
458#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */ 464#define CLEAR_MGMT_PENDING_STATUS(status) \
459 465 status &= ~MPT_MGMT_STATUS_PENDING;
460typedef struct _MPT_SAS_MGMT { 466#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
467 msg_context = value;
468
469typedef struct _MPT_MGMT {
461 struct mutex mutex; 470 struct mutex mutex;
462 struct completion done; 471 struct completion done;
463 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 472 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
473 u8 sense[MPT_SENSE_BUFFER_ALLOC];
464 u8 status; /* current command status */ 474 u8 status; /* current command status */
465}MPT_SAS_MGMT; 475 int completion_code;
476 u32 msg_context;
477} MPT_MGMT;
466 478
467/* 479/*
468 * Event Structure and define 480 * Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
564 u8 flags; 576 u8 flags;
565}; 577};
566 578
579typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
580typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
581 dma_addr_t dma_addr);
582
567/* 583/*
568 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 584 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
569 */ 585 */
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
573 int pci_irq; /* This irq */ 589 int pci_irq; /* This irq */
574 char name[MPT_NAME_LENGTH]; /* "iocN" */ 590 char name[MPT_NAME_LENGTH]; /* "iocN" */
575 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ 591 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
592#ifdef CONFIG_FUSION_LOGGING
593 /* used in mpt_display_event_info */
594 char evStr[EVENT_DESCR_STR_SZ];
595#endif
576 char board_name[16]; 596 char board_name[16];
577 char board_assembly[16]; 597 char board_assembly[16];
578 char board_tracer[16]; 598 char board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
600 int reply_depth; /* Num Allocated reply frames */ 620 int reply_depth; /* Num Allocated reply frames */
601 int reply_sz; /* Reply frame size */ 621 int reply_sz; /* Reply frame size */
602 int num_chain; /* Number of chain buffers */ 622 int num_chain; /* Number of chain buffers */
623 MPT_ADD_SGE add_sge; /* Pointer to add_sge
624 function */
625 MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
626 function */
603 /* Pool of buffers for chaining. ReqToChain 627 /* Pool of buffers for chaining. ReqToChain
604 * and ChainToChain track index of chain buffers. 628 * and ChainToChain track index of chain buffers.
605 * ChainBuffer (DMA) virt/phys addresses. 629 * ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
640 RaidCfgData raid_data; /* Raid config. data */ 664 RaidCfgData raid_data; /* Raid config. data */
641 SasCfgData sas_data; /* Sas config. data */ 665 SasCfgData sas_data; /* Sas config. data */
642 FcCfgData fc_data; /* Fc config. data */ 666 FcCfgData fc_data; /* Fc config. data */
643 MPT_IOCTL *ioctl; /* ioctl data pointer */
644 struct proc_dir_entry *ioc_dentry; 667 struct proc_dir_entry *ioc_dentry;
645 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 668 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
646 spinlock_t diagLock; /* diagnostic reset lock */
647 int diagPending;
648 u32 biosVersion; /* BIOS version from IO Unit Page 2 */ 669 u32 biosVersion; /* BIOS version from IO Unit Page 2 */
649 int eventTypes; /* Event logging parameters */ 670 int eventTypes; /* Event logging parameters */
650 int eventContext; /* Next event context */ 671 int eventContext; /* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
652 struct _mpt_ioctl_events *events; /* pointer to event log */ 673 struct _mpt_ioctl_events *events; /* pointer to event log */
653 u8 *cached_fw; /* Pointer to FW */ 674 u8 *cached_fw; /* Pointer to FW */
654 dma_addr_t cached_fw_dma; 675 dma_addr_t cached_fw_dma;
655 struct list_head configQ; /* linked list of config. requests */
656 int hs_reply_idx; 676 int hs_reply_idx;
657#ifndef MFCNT 677#ifndef MFCNT
658 u32 pad0; 678 u32 pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
665 IOCFactsReply_t facts; 685 IOCFactsReply_t facts;
666 PortFactsReply_t pfacts[2]; 686 PortFactsReply_t pfacts[2];
667 FCPortPage0_t fc_port_page0[2]; 687 FCPortPage0_t fc_port_page0[2];
668 struct timer_list persist_timer; /* persist table timer */
669 int persist_wait_done; /* persist completion flag */
670 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
671 LANPage0_t lan_cnfg_page0; 688 LANPage0_t lan_cnfg_page0;
672 LANPage1_t lan_cnfg_page1; 689 LANPage1_t lan_cnfg_page1;
673 690
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
682 int aen_event_read_flag; /* flag to indicate event log was read*/ 699 int aen_event_read_flag; /* flag to indicate event log was read*/
683 u8 FirstWhoInit; 700 u8 FirstWhoInit;
684 u8 upload_fw; /* If set, do a fw upload */ 701 u8 upload_fw; /* If set, do a fw upload */
685 u8 reload_fw; /* Force a FW Reload on next reset */
686 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 702 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
687 u8 pad1[4]; 703 u8 pad1[4];
688 u8 DoneCtx; 704 u8 DoneCtx;
689 u8 TaskCtx; 705 u8 TaskCtx;
690 u8 InternalCtx; 706 u8 InternalCtx;
691 spinlock_t initializing_hba_lock;
692 int initializing_hba_lock_flag;
693 struct list_head list; 707 struct list_head list;
694 struct net_device *netdev; 708 struct net_device *netdev;
695 struct list_head sas_topology; 709 struct list_head sas_topology;
696 struct mutex sas_topology_mutex; 710 struct mutex sas_topology_mutex;
711
712 struct workqueue_struct *fw_event_q;
713 struct list_head fw_event_list;
714 spinlock_t fw_event_lock;
715 u8 fw_events_off; /* if '1', then ignore events */
716 char fw_event_q_name[MPT_KOBJ_NAME_LEN];
717
697 struct mutex sas_discovery_mutex; 718 struct mutex sas_discovery_mutex;
698 u8 sas_discovery_runtime; 719 u8 sas_discovery_runtime;
699 u8 sas_discovery_ignore_events; 720 u8 sas_discovery_ignore_events;
721
722 /* port_info object for the host */
723 struct mptsas_portinfo *hba_port_info;
724 u64 hba_port_sas_addr;
725 u16 hba_port_num_phy;
726 struct list_head sas_device_info_list;
727 struct mutex sas_device_info_mutex;
728 u8 old_sas_discovery_protocal;
729 u8 sas_discovery_quiesce_io;
700 int sas_index; /* index refrencing */ 730 int sas_index; /* index refrencing */
701 MPT_SAS_MGMT sas_mgmt; 731 MPT_MGMT sas_mgmt;
732 MPT_MGMT mptbase_cmds; /* for sending config pages */
733 MPT_MGMT internal_cmds;
734 MPT_MGMT taskmgmt_cmds;
735 MPT_MGMT ioctl_cmds;
736 spinlock_t taskmgmt_lock; /* diagnostic reset lock */
737 int taskmgmt_in_progress;
738 u8 taskmgmt_quiesce_io;
739 u8 ioc_reset_in_progress;
702 struct work_struct sas_persist_task; 740 struct work_struct sas_persist_task;
703 741
704 struct work_struct fc_setup_reset_work; 742 struct work_struct fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
707 u8 fc_link_speed[2]; 745 u8 fc_link_speed[2];
708 spinlock_t fc_rescan_work_lock; 746 spinlock_t fc_rescan_work_lock;
709 struct work_struct fc_rescan_work; 747 struct work_struct fc_rescan_work;
710 char fc_rescan_work_q_name[20]; 748 char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
711 struct workqueue_struct *fc_rescan_work_q; 749 struct workqueue_struct *fc_rescan_work_q;
750
751 /* driver forced bus resets count */
752 unsigned long hard_resets;
753 /* fw/external bus resets count */
754 unsigned long soft_resets;
755 /* cmd timeouts */
756 unsigned long timeouts;
757
712 struct scsi_cmnd **ScsiLookup; 758 struct scsi_cmnd **ScsiLookup;
713 spinlock_t scsi_lookup_lock; 759 spinlock_t scsi_lookup_lock;
714 760 u64 dma_mask;
715 char reset_work_q_name[20]; 761 u32 broadcast_aen_busy;
762 char reset_work_q_name[MPT_KOBJ_NAME_LEN];
716 struct workqueue_struct *reset_work_q; 763 struct workqueue_struct *reset_work_q;
717 struct delayed_work fault_reset_work; 764 struct delayed_work fault_reset_work;
718 spinlock_t fault_reset_work_lock; 765
766 u8 sg_addr_size;
767 u8 in_rescan;
768 u8 SGE_size;
719 769
720} MPT_ADAPTER; 770} MPT_ADAPTER;
721 771
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
753 dma_addr_t Address; 803 dma_addr_t Address;
754} MptSge_t; 804} MptSge_t;
755 805
756#define mpt_addr_size() \
757 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
758 MPI_SGE_FLAGS_32_BIT_ADDRESSING)
759 806
760#define mpt_msg_flags() \ 807#define mpt_msg_flags(ioc) \
761 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ 808 (ioc->sg_addr_size == sizeof(u64)) ? \
762 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32) 809 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
810 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
811
812#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
813 (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
763 814
764/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
765/* 816/*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
835 /* Pool of memory for holding SCpnts before doing 886 /* Pool of memory for holding SCpnts before doing
836 * OS callbacks. freeQ is the free pool. 887 * OS callbacks. freeQ is the free pool.
837 */ 888 */
838 u8 tmPending;
839 u8 resetPending;
840 u8 negoNvram; /* DV disabled, nego NVRAM */ 889 u8 negoNvram; /* DV disabled, nego NVRAM */
841 u8 pad1; 890 u8 pad1;
842 u8 tmState;
843 u8 rsvd[2]; 891 u8 rsvd[2];
844 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ 892 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
845 struct scsi_cmnd *abortSCpnt; 893 struct scsi_cmnd *abortSCpnt;
846 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ 894 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
847 unsigned long hard_resets; /* driver forced bus resets count */
848 unsigned long soft_resets; /* fw/external bus resets count */
849 unsigned long timeouts; /* cmd timeouts */
850 ushort sel_timeout[MPT_MAX_FC_DEVICES]; 895 ushort sel_timeout[MPT_MAX_FC_DEVICES];
851 char *info_kbuf; 896 char *info_kbuf;
852 wait_queue_head_t scandv_waitq;
853 int scandv_wait_done;
854 long last_queue_full; 897 long last_queue_full;
855 u16 tm_iocstatus; 898 u16 tm_iocstatus;
856 u16 spi_pending; 899 u16 spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
870 * Generic structure passed to the base mpt_config function. 913 * Generic structure passed to the base mpt_config function.
871 */ 914 */
872typedef struct _x_config_parms { 915typedef struct _x_config_parms {
873 struct list_head linkage; /* linked list */
874 struct timer_list timer; /* timer function for this request */
875 union { 916 union {
876 ConfigExtendedPageHeader_t *ehdr; 917 ConfigExtendedPageHeader_t *ehdr;
877 ConfigPageHeader_t *hdr; 918 ConfigPageHeader_t *hdr;
878 } cfghdr; 919 } cfghdr;
879 dma_addr_t physAddr; 920 dma_addr_t physAddr;
880 int wait_done; /* wait for this request */
881 u32 pageAddr; /* properly formatted */ 921 u32 pageAddr; /* properly formatted */
922 u16 status;
882 u8 action; 923 u8 action;
883 u8 dir; 924 u8 dir;
884 u8 timeout; /* seconds */ 925 u8 timeout; /* seconds */
885 u8 pad1;
886 u16 status;
887 u16 pad2;
888} CONFIGPARMS; 926} CONFIGPARMS;
889 927
890/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 928/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
909extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 947extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
910extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 948extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
911extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 949extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
912extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
913 950
914extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); 951extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
915extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); 952extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
922extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 959extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
923extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 960extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
924extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); 961extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
962extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
963 pRaidPhysDiskPage1_t phys_disk);
964extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
965 u8 phys_disk_num);
966extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
967extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
925extern void mpt_halt_firmware(MPT_ADAPTER *ioc); 968extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
926 969
927 970
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
959#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) 1002#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
960#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) 1003#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
961#define MPT_SGE_FLAGS_DIRECTION (0x04000000) 1004#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
962#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
963#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) 1005#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
964 1006
965#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) 1007#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
972 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1014 MPT_SGE_FLAGS_END_OF_BUFFER | \
973 MPT_SGE_FLAGS_END_OF_LIST | \ 1015 MPT_SGE_FLAGS_END_OF_LIST | \
974 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1016 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
975 MPT_SGE_FLAGS_ADDRESSING | \
976 MPT_TRANSFER_IOC_TO_HOST) 1017 MPT_TRANSFER_IOC_TO_HOST)
977#define MPT_SGE_FLAGS_SSIMPLE_WRITE \ 1018#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
978 (MPT_SGE_FLAGS_LAST_ELEMENT | \ 1019 (MPT_SGE_FLAGS_LAST_ELEMENT | \
979 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1020 MPT_SGE_FLAGS_END_OF_BUFFER | \
980 MPT_SGE_FLAGS_END_OF_LIST | \ 1021 MPT_SGE_FLAGS_END_OF_LIST | \
981 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1022 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
982 MPT_SGE_FLAGS_ADDRESSING | \
983 MPT_TRANSFER_HOST_TO_IOC) 1023 MPT_TRANSFER_HOST_TO_IOC)
984 1024
985/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1025/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index c63817117c0a..9b2e2198aee9 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION);
84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
85 85
86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; 86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
87static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
87 88
88static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); 89static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
89 90
@@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
127 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); 128 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
128static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, 129static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
129 struct buflist *buflist, MPT_ADAPTER *ioc); 130 struct buflist *buflist, MPT_ADAPTER *ioc);
130static void mptctl_timeout_expired (MPT_IOCTL *ioctl); 131static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
131static int mptctl_bus_reset(MPT_IOCTL *ioctl);
132static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
133static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
134 132
135/* 133/*
136 * Reset Handler cleanup function 134 * Reset Handler cleanup function
@@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
183 int rc = 0; 181 int rc = 0;
184 182
185 if (nonblock) { 183 if (nonblock) {
186 if (!mutex_trylock(&ioc->ioctl->ioctl_mutex)) 184 if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
187 rc = -EAGAIN; 185 rc = -EAGAIN;
188 } else { 186 } else {
189 if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex)) 187 if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
190 rc = -ERESTARTSYS; 188 rc = -ERESTARTSYS;
191 } 189 }
192 return rc; 190 return rc;
@@ -202,99 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
202static int 200static int
203mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) 201mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
204{ 202{
205 char *sense_data; 203 char *sense_data;
206 int sz, req_index; 204 int req_index;
207 u16 iocStatus; 205 int sz;
208 u8 cmd;
209 206
210 if (req) 207 if (!req)
211 cmd = req->u.hdr.Function; 208 return 0;
212 else
213 return 1;
214 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, "
215 "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply));
216
217 if (ioc->ioctl) {
218
219 if (reply==NULL) {
220
221 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply "
222 "Function=%x!\n", ioc->name, cmd));
223 209
224 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; 210 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
225 ioc->ioctl->reset &= ~MPTCTL_RESET_OK; 211 "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
212 req, reply));
226 213
227 /* We are done, issue wake up 214 /*
228 */ 215 * Handling continuation of the same reply. Processing the first
229 ioc->ioctl->wait_done = 1; 216 * reply, and eating the other replys that come later.
230 wake_up (&mptctl_wait); 217 */
231 return 1; 218 if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
219 goto out_continuation;
232 220
233 } 221 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
234 222
235 /* Copy the reply frame (which much exist 223 if (!reply)
236 * for non-SCSI I/O) to the IOC structure. 224 goto out;
237 */
238 memcpy(ioc->ioctl->ReplyFrame, reply,
239 min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
240 ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
241 225
242 /* Set the command status to GOOD if IOC Status is GOOD 226 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
243 * OR if SCSI I/O cmd and data underrun or recovered error. 227 sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
244 */ 228 memcpy(ioc->ioctl_cmds.reply, reply, sz);
245 iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
246 if (iocStatus == MPI_IOCSTATUS_SUCCESS)
247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
248
249 if (iocStatus || reply->u.reply.IOCLogInfo)
250 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), "
251 "loginfo (0x%08X)\n", ioc->name,
252 iocStatus,
253 le32_to_cpu(reply->u.reply.IOCLogInfo)));
254
255 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
256 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
257
258 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
259 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
260 "\tscsi_status (0x%02x), scsi_state (0x%02x), "
261 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
262 reply->u.sreply.SCSIStatus,
263 reply->u.sreply.SCSIState,
264 le16_to_cpu(reply->u.sreply.TaskTag),
265 le32_to_cpu(reply->u.sreply.TransferCount)));
266
267 ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
268
269 if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
270 (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
271 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
272 }
273 }
274 229
275 /* Copy the sense data - if present 230 if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
276 */ 231 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
277 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && 232 "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
278 (reply->u.sreply.SCSIState & 233 le16_to_cpu(reply->u.reply.IOCStatus),
279 MPI_SCSI_STATE_AUTOSENSE_VALID)){ 234 le32_to_cpu(reply->u.reply.IOCLogInfo)));
235
236 if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
237 (req->u.hdr.Function ==
238 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
239
240 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
241 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
242 "scsi_status (0x%02x), scsi_state (0x%02x), "
243 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
244 reply->u.sreply.SCSIStatus,
245 reply->u.sreply.SCSIState,
246 le16_to_cpu(reply->u.sreply.TaskTag),
247 le32_to_cpu(reply->u.sreply.TransferCount)));
248
249 if (reply->u.sreply.SCSIState &
250 MPI_SCSI_STATE_AUTOSENSE_VALID) {
280 sz = req->u.scsireq.SenseBufferLength; 251 sz = req->u.scsireq.SenseBufferLength;
281 req_index = 252 req_index =
282 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); 253 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
283 sense_data = 254 sense_data = ((u8 *)ioc->sense_buf_pool +
284 ((u8 *)ioc->sense_buf_pool +
285 (req_index * MPT_SENSE_BUFFER_ALLOC)); 255 (req_index * MPT_SENSE_BUFFER_ALLOC));
286 memcpy(ioc->ioctl->sense, sense_data, sz); 256 memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
287 ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; 257 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
288 } 258 }
259 }
289 260
290 if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) 261 out:
291 mptctl_free_tm_flags(ioc); 262 /* We are done, issue wake up
292 263 */
293 /* We are done, issue wake up 264 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
294 */ 265 if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
295 ioc->ioctl->wait_done = 1; 266 mpt_clear_taskmgmt_in_progress_flag(ioc);
296 wake_up (&mptctl_wait); 267 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
268 complete(&ioc->ioctl_cmds.done);
297 } 269 }
270
271 out_continuation:
272 if (reply && (reply->u.reply.MsgFlags &
273 MPI_MSGFLAGS_CONTINUATION_REPLY))
274 return 0;
298 return 1; 275 return 1;
299} 276}
300 277
@@ -304,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
304 * Expecting an interrupt, however timed out. 281 * Expecting an interrupt, however timed out.
305 * 282 *
306 */ 283 */
307static void mptctl_timeout_expired (MPT_IOCTL *ioctl) 284static void
285mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
308{ 286{
309 int rc = 1; 287 unsigned long flags;
310 288
311 if (ioctl == NULL) 289 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
312 return; 290 ioc->name, __func__));
313 dctlprintk(ioctl->ioc,
314 printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
315 ioctl->ioc->name, ioctl->ioc->id));
316 291
317 ioctl->wait_done = 0; 292 if (mpt_fwfault_debug)
318 if (ioctl->reset & MPTCTL_RESET_OK) 293 mpt_halt_firmware(ioc);
319 rc = mptctl_bus_reset(ioctl);
320 294
321 if (rc) { 295 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
322 /* Issue a reset for this device. 296 if (ioc->ioc_reset_in_progress) {
323 * The IOC is not responding. 297 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
324 */ 298 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
325 dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 299 mpt_free_msg_frame(ioc, mf);
326 ioctl->ioc->name)); 300 return;
327 mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
328 } 301 }
329 return; 302 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
303
330 304
305 if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
306 return;
307
308 /* Issue a reset for this device.
309 * The IOC is not responding.
310 */
311 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
312 ioc->name));
313 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
314 mpt_HardResetHandler(ioc, CAN_SLEEP);
315 mpt_free_msg_frame(ioc, mf);
316}
317
318static int
319mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
320{
321 if (!mf)
322 return 0;
323
324 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
325 "TaskMgmt completed (mf=%p, mr=%p)\n",
326 ioc->name, mf, mr));
327
328 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
329
330 if (!mr)
331 goto out;
332
333 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
334 memcpy(ioc->taskmgmt_cmds.reply, mr,
335 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
336 out:
337 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
338 mpt_clear_taskmgmt_in_progress_flag(ioc);
339 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
340 complete(&ioc->taskmgmt_cmds.done);
341 return 1;
342 }
343 return 0;
331} 344}
332 345
333/* mptctl_bus_reset 346/* mptctl_bus_reset
@@ -335,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
335 * Bus reset code. 348 * Bus reset code.
336 * 349 *
337 */ 350 */
338static int mptctl_bus_reset(MPT_IOCTL *ioctl) 351static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
339{ 352{
340 MPT_FRAME_HDR *mf; 353 MPT_FRAME_HDR *mf;
341 SCSITaskMgmt_t *pScsiTm; 354 SCSITaskMgmt_t *pScsiTm;
342 MPT_SCSI_HOST *hd; 355 SCSITaskMgmtReply_t *pScsiTmReply;
343 int ii; 356 int ii;
344 int retval=0; 357 int retval;
345 358 unsigned long timeout;
346 359 unsigned long time_count;
347 ioctl->reset &= ~MPTCTL_RESET_OK; 360 u16 iocstatus;
348 361
349 if (ioctl->ioc->sh == NULL) 362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */
363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
364 (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 "TaskMgmt, not SCSI_IO!!\n", ioc->name));
350 return -EPERM; 367 return -EPERM;
368 }
351 369
352 hd = shost_priv(ioctl->ioc->sh); 370 mutex_lock(&ioc->taskmgmt_cmds.mutex);
353 if (hd == NULL) 371 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
372 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
354 return -EPERM; 373 return -EPERM;
374 }
355 375
356 /* Single threading .... 376 retval = 0;
357 */
358 if (mptctl_set_tm_flags(hd) != 0)
359 return -EPERM;
360 377
361 /* Send request 378 /* Send request
362 */ 379 */
363 if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) { 380 mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
364 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n", 381 if (mf == NULL) {
365 ioctl->ioc->name)); 382 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 383 "TaskMgmt, no msg frames!!\n", ioc->name));
367 mptctl_free_tm_flags(ioctl->ioc); 384 mpt_clear_taskmgmt_in_progress_flag(ioc);
368 return -ENOMEM; 385 retval = -ENOMEM;
386 goto mptctl_bus_reset_done;
369 } 387 }
370 388
371 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 389 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
372 ioctl->ioc->name, mf)); 390 ioc->name, mf));
373 391
374 pScsiTm = (SCSITaskMgmt_t *) mf; 392 pScsiTm = (SCSITaskMgmt_t *) mf;
375 pScsiTm->TargetID = ioctl->id; 393 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
376 pScsiTm->Bus = hd->port; /* 0 */
377 pScsiTm->ChainOffset = 0;
378 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 394 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
379 pScsiTm->Reserved = 0;
380 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; 395 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
381 pScsiTm->Reserved1 = 0;
382 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; 396 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
383 397 pScsiTm->TargetID = 0;
398 pScsiTm->Bus = 0;
399 pScsiTm->ChainOffset = 0;
400 pScsiTm->Reserved = 0;
401 pScsiTm->Reserved1 = 0;
402 pScsiTm->TaskMsgContext = 0;
384 for (ii= 0; ii < 8; ii++) 403 for (ii= 0; ii < 8; ii++)
385 pScsiTm->LUN[ii] = 0; 404 pScsiTm->LUN[ii] = 0;
386
387 for (ii=0; ii < 7; ii++) 405 for (ii=0; ii < 7; ii++)
388 pScsiTm->Reserved2[ii] = 0; 406 pScsiTm->Reserved2[ii] = 0;
389 407
390 pScsiTm->TaskMsgContext = 0; 408 switch (ioc->bus_type) {
391 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT 409 case FC:
392 "mptctl_bus_reset: issued.\n", ioctl->ioc->name)); 410 timeout = 40;
393 411 break;
394 DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf); 412 case SAS:
413 timeout = 30;
414 break;
415 case SPI:
416 default:
417 timeout = 2;
418 break;
419 }
395 420
396 ioctl->wait_done=0; 421 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
422 "TaskMgmt type=%d timeout=%ld\n",
423 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
397 424
398 if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 425 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
399 (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 426 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
400 mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf); 427 time_count = jiffies;
428 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
429 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
430 mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
401 else { 431 else {
402 retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc, 432 retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
403 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 433 sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
404 if (retval != 0) { 434 if (retval != 0) {
405 dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!" 435 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
406 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, 436 "TaskMgmt send_handshake FAILED!"
407 hd->ioc, mf)); 437 " (ioc %p, mf %p, rc=%d) \n", ioc->name,
438 ioc, mf, retval));
439 mpt_clear_taskmgmt_in_progress_flag(ioc);
408 goto mptctl_bus_reset_done; 440 goto mptctl_bus_reset_done;
409 } 441 }
410 } 442 }
411 443
412 /* Now wait for the command to complete */ 444 /* Now wait for the command to complete */
413 ii = wait_event_timeout(mptctl_wait, 445 ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
414 ioctl->wait_done == 1, 446 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
415 HZ*5 /* 5 second timeout */); 447 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
448 "TaskMgmt failed\n", ioc->name));
449 mpt_free_msg_frame(ioc, mf);
450 mpt_clear_taskmgmt_in_progress_flag(ioc);
451 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
452 retval = 0;
453 else
454 retval = -1; /* return failure */
455 goto mptctl_bus_reset_done;
456 }
416 457
417 if(ii <=0 && (ioctl->wait_done != 1 )) { 458 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
418 mpt_free_msg_frame(hd->ioc, mf); 459 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
419 ioctl->wait_done = 0; 460 "TaskMgmt failed\n", ioc->name));
461 retval = -1; /* return failure */
462 goto mptctl_bus_reset_done;
463 }
464
465 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
466 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
467 "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
468 "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
469 "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
470 pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
471 le16_to_cpu(pScsiTmReply->IOCStatus),
472 le32_to_cpu(pScsiTmReply->IOCLogInfo),
473 pScsiTmReply->ResponseCode,
474 le32_to_cpu(pScsiTmReply->TerminationCount)));
475
476 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
477
478 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
479 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
480 iocstatus == MPI_IOCSTATUS_SUCCESS)
481 retval = 0;
482 else {
483 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
484 "TaskMgmt failed\n", ioc->name));
420 retval = -1; /* return failure */ 485 retval = -1; /* return failure */
421 } 486 }
422 487
423mptctl_bus_reset_done:
424 488
425 mptctl_free_tm_flags(ioctl->ioc); 489 mptctl_bus_reset_done:
490 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
491 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
426 return retval; 492 return retval;
427} 493}
428 494
429static int
430mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
431 unsigned long flags;
432
433 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
434
435 if (hd->tmState == TM_STATE_NONE) {
436 hd->tmState = TM_STATE_IN_PROGRESS;
437 hd->tmPending = 1;
438 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
439 } else {
440 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
441 return -EBUSY;
442 }
443
444 return 0;
445}
446
447static void
448mptctl_free_tm_flags(MPT_ADAPTER *ioc)
449{
450 MPT_SCSI_HOST * hd;
451 unsigned long flags;
452
453 hd = shost_priv(ioc->sh);
454 if (hd == NULL)
455 return;
456
457 spin_lock_irqsave(&ioc->FreeQlock, flags);
458
459 hd->tmState = TM_STATE_NONE;
460 hd->tmPending = 0;
461 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
462
463 return;
464}
465 495
466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 496/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
467/* mptctl_ioc_reset 497/* mptctl_ioc_reset
@@ -473,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc)
473static int 503static int
474mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 504mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
475{ 505{
476 MPT_IOCTL *ioctl = ioc->ioctl;
477 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name,
478 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
479 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
480
481 if(ioctl == NULL)
482 return 1;
483
484 switch(reset_phase) { 506 switch(reset_phase) {
485 case MPT_IOC_SETUP_RESET: 507 case MPT_IOC_SETUP_RESET:
486 ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET; 508 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
509 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
510 break;
511 case MPT_IOC_PRE_RESET:
512 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
513 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
487 break; 514 break;
488 case MPT_IOC_POST_RESET: 515 case MPT_IOC_POST_RESET:
489 ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET; 516 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
517 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
518 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
519 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
520 complete(&ioc->ioctl_cmds.done);
521 }
490 break; 522 break;
491 case MPT_IOC_PRE_RESET:
492 default: 523 default:
493 break; 524 break;
494 } 525 }
@@ -642,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642 else 673 else
643 ret = -EINVAL; 674 ret = -EINVAL;
644 675
645 mutex_unlock(&iocp->ioctl->ioctl_mutex); 676 mutex_unlock(&iocp->ioctl_cmds.mutex);
646 677
647 return ret; 678 return ret;
648} 679}
@@ -758,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
758 int sge_offset = 0; 789 int sge_offset = 0;
759 u16 iocstat; 790 u16 iocstat;
760 pFWDownloadReply_t ReplyMsg = NULL; 791 pFWDownloadReply_t ReplyMsg = NULL;
792 unsigned long timeleft;
761 793
762 if (mpt_verify_adapter(ioc, &iocp) < 0) { 794 if (mpt_verify_adapter(ioc, &iocp) < 0) {
763 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", 795 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
@@ -841,8 +873,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
841 * 96 8 873 * 96 8
842 * 64 4 874 * 64 4
843 */ 875 */
844 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) 876 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
845 / (sizeof(dma_addr_t) + sizeof(u32)); 877 sizeof(FWDownloadTCSGE_t))
878 / iocp->SGE_size;
846 if (numfrags > maxfrags) { 879 if (numfrags > maxfrags) {
847 ret = -EMLINK; 880 ret = -EMLINK;
848 goto fwdl_out; 881 goto fwdl_out;
@@ -870,7 +903,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
870 if (nib == 0 || nib == 3) { 903 if (nib == 0 || nib == 3) {
871 ; 904 ;
872 } else if (sgIn->Address) { 905 } else if (sgIn->Address) {
873 mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); 906 iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
874 n++; 907 n++;
875 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { 908 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
876 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " 909 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
@@ -882,7 +915,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
882 } 915 }
883 sgIn++; 916 sgIn++;
884 bl++; 917 bl++;
885 sgOut += (sizeof(dma_addr_t) + sizeof(u32)); 918 sgOut += iocp->SGE_size;
886 } 919 }
887 920
888 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); 921 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
@@ -891,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
891 * Finally, perform firmware download. 924 * Finally, perform firmware download.
892 */ 925 */
893 ReplyMsg = NULL; 926 ReplyMsg = NULL;
927 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
928 INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
894 mpt_put_msg_frame(mptctl_id, iocp, mf); 929 mpt_put_msg_frame(mptctl_id, iocp, mf);
895 930
896 /* Now wait for the command to complete */ 931 /* Now wait for the command to complete */
897 ret = wait_event_timeout(mptctl_wait, 932retry_wait:
898 iocp->ioctl->wait_done == 1, 933 timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
899 HZ*60); 934 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
935 ret = -ETIME;
936 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
937 if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
938 mpt_free_msg_frame(iocp, mf);
939 goto fwdl_out;
940 }
941 if (!timeleft)
942 mptctl_timeout_expired(iocp, mf);
943 else
944 goto retry_wait;
945 goto fwdl_out;
946 }
900 947
901 if(ret <=0 && (iocp->ioctl->wait_done != 1 )) { 948 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
902 /* Now we need to reset the board */ 949 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
903 mptctl_timeout_expired(iocp->ioctl); 950 mpt_free_msg_frame(iocp, mf);
904 ret = -ENODATA; 951 ret = -ENODATA;
905 goto fwdl_out; 952 goto fwdl_out;
906 } 953 }
@@ -908,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
908 if (sgl) 955 if (sgl)
909 kfree_sgl(sgl, sgl_dma, buflist, iocp); 956 kfree_sgl(sgl, sgl_dma, buflist, iocp);
910 957
911 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame; 958 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
912 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; 959 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
913 if (iocstat == MPI_IOCSTATUS_SUCCESS) { 960 if (iocstat == MPI_IOCSTATUS_SUCCESS) {
914 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); 961 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name);
@@ -932,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
932 return 0; 979 return 0;
933 980
934fwdl_out: 981fwdl_out:
982
983 CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
984 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
935 kfree_sgl(sgl, sgl_dma, buflist, iocp); 985 kfree_sgl(sgl, sgl_dma, buflist, iocp);
936 return ret; 986 return ret;
937} 987}
@@ -1003,7 +1053,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1003 * 1053 *
1004 */ 1054 */
1005 sgl = sglbuf; 1055 sgl = sglbuf;
1006 sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1; 1056 sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
1007 while (bytes_allocd < bytes) { 1057 while (bytes_allocd < bytes) {
1008 this_alloc = min(alloc_sz, bytes-bytes_allocd); 1058 this_alloc = min(alloc_sz, bytes-bytes_allocd);
1009 buflist[buflist_ent].len = this_alloc; 1059 buflist[buflist_ent].len = this_alloc;
@@ -1024,8 +1074,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1024 dma_addr_t dma_addr; 1074 dma_addr_t dma_addr;
1025 1075
1026 bytes_allocd += this_alloc; 1076 bytes_allocd += this_alloc;
1027 sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc); 1077 sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
1028 dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); 1078 dma_addr = pci_map_single(ioc->pcidev,
1079 buflist[buflist_ent].kptr, this_alloc, dir);
1029 sgl->Address = dma_addr; 1080 sgl->Address = dma_addr;
1030 1081
1031 fragcnt++; 1082 fragcnt++;
@@ -1771,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1771 int msgContext; 1822 int msgContext;
1772 u16 req_idx; 1823 u16 req_idx;
1773 ulong timeout; 1824 ulong timeout;
1825 unsigned long timeleft;
1774 struct scsi_device *sdev; 1826 struct scsi_device *sdev;
1827 unsigned long flags;
1828 u8 function;
1775 1829
1776 /* bufIn and bufOut are used for user to kernel space transfers 1830 /* bufIn and bufOut are used for user to kernel space transfers
1777 */ 1831 */
@@ -1784,24 +1838,23 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1784 __FILE__, __LINE__, iocnum); 1838 __FILE__, __LINE__, iocnum);
1785 return -ENODEV; 1839 return -ENODEV;
1786 } 1840 }
1787 if (!ioc->ioctl) { 1841
1788 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1842 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1789 "No memory available during driver init.\n", 1843 if (ioc->ioc_reset_in_progress) {
1790 __FILE__, __LINE__); 1844 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1791 return -ENOMEM;
1792 } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
1793 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1845 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
1794 "Busy with IOC Reset \n", __FILE__, __LINE__); 1846 "Busy with diagnostic reset\n", __FILE__, __LINE__);
1795 return -EBUSY; 1847 return -EBUSY;
1796 } 1848 }
1849 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1797 1850
1798 /* Verify that the final request frame will not be too large. 1851 /* Verify that the final request frame will not be too large.
1799 */ 1852 */
1800 sz = karg.dataSgeOffset * 4; 1853 sz = karg.dataSgeOffset * 4;
1801 if (karg.dataInSize > 0) 1854 if (karg.dataInSize > 0)
1802 sz += sizeof(dma_addr_t) + sizeof(u32); 1855 sz += ioc->SGE_size;
1803 if (karg.dataOutSize > 0) 1856 if (karg.dataOutSize > 0)
1804 sz += sizeof(dma_addr_t) + sizeof(u32); 1857 sz += ioc->SGE_size;
1805 1858
1806 if (sz > ioc->req_sz) { 1859 if (sz > ioc->req_sz) {
1807 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1860 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1827,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1827 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1880 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
1828 "Unable to read MF from mpt_ioctl_command struct @ %p\n", 1881 "Unable to read MF from mpt_ioctl_command struct @ %p\n",
1829 ioc->name, __FILE__, __LINE__, mfPtr); 1882 ioc->name, __FILE__, __LINE__, mfPtr);
1883 function = -1;
1830 rc = -EFAULT; 1884 rc = -EFAULT;
1831 goto done_free_mem; 1885 goto done_free_mem;
1832 } 1886 }
1833 hdr->MsgContext = cpu_to_le32(msgContext); 1887 hdr->MsgContext = cpu_to_le32(msgContext);
1888 function = hdr->Function;
1834 1889
1835 1890
1836 /* Verify that this request is allowed. 1891 /* Verify that this request is allowed.
@@ -1838,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1838 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", 1893 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
1839 ioc->name, hdr->Function, mf)); 1894 ioc->name, hdr->Function, mf));
1840 1895
1841 switch (hdr->Function) { 1896 switch (function) {
1842 case MPI_FUNCTION_IOC_FACTS: 1897 case MPI_FUNCTION_IOC_FACTS:
1843 case MPI_FUNCTION_PORT_FACTS: 1898 case MPI_FUNCTION_PORT_FACTS:
1844 karg.dataOutSize = karg.dataInSize = 0; 1899 karg.dataOutSize = karg.dataInSize = 0;
@@ -1893,7 +1948,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1893 } 1948 }
1894 1949
1895 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1950 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1896 pScsiReq->MsgFlags |= mpt_msg_flags(); 1951 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1897 1952
1898 1953
1899 /* verify that app has not requested 1954 /* verify that app has not requested
@@ -1935,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1935 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 1990 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
1936 pScsiReq->DataLength = cpu_to_le32(dataSize); 1991 pScsiReq->DataLength = cpu_to_le32(dataSize);
1937 1992
1938 ioc->ioctl->reset = MPTCTL_RESET_OK;
1939 ioc->ioctl->id = pScsiReq->TargetID;
1940 1993
1941 } else { 1994 } else {
1942 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1995 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1979,7 +2032,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1979 int dataSize; 2032 int dataSize;
1980 2033
1981 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 2034 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1982 pScsiReq->MsgFlags |= mpt_msg_flags(); 2035 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1983 2036
1984 2037
1985 /* verify that app has not requested 2038 /* verify that app has not requested
@@ -2014,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2014 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 2067 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
2015 pScsiReq->DataLength = cpu_to_le32(dataSize); 2068 pScsiReq->DataLength = cpu_to_le32(dataSize);
2016 2069
2017 ioc->ioctl->reset = MPTCTL_RESET_OK;
2018 ioc->ioctl->id = pScsiReq->TargetID;
2019 } else { 2070 } else {
2020 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2071 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2021 "SCSI driver is not loaded. \n", 2072 "SCSI driver is not loaded. \n",
@@ -2026,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2026 break; 2077 break;
2027 2078
2028 case MPI_FUNCTION_SCSI_TASK_MGMT: 2079 case MPI_FUNCTION_SCSI_TASK_MGMT:
2029 { 2080 {
2030 MPT_SCSI_HOST *hd = NULL; 2081 SCSITaskMgmt_t *pScsiTm;
2031 if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) { 2082 pScsiTm = (SCSITaskMgmt_t *)mf;
2032 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2083 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2033 "SCSI driver not loaded or SCSI host not found. \n", 2084 "\tTaskType=0x%x MsgFlags=0x%x "
2034 ioc->name, __FILE__, __LINE__); 2085 "TaskMsgContext=0x%x id=%d channel=%d\n",
2035 rc = -EFAULT; 2086 ioc->name, pScsiTm->TaskType, le32_to_cpu
2036 goto done_free_mem; 2087 (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
2037 } else if (mptctl_set_tm_flags(hd) != 0) { 2088 pScsiTm->TargetID, pScsiTm->Bus));
2038 rc = -EPERM;
2039 goto done_free_mem;
2040 }
2041 }
2042 break; 2089 break;
2090 }
2043 2091
2044 case MPI_FUNCTION_IOC_INIT: 2092 case MPI_FUNCTION_IOC_INIT:
2045 { 2093 {
@@ -2123,8 +2171,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2123 if (karg.dataInSize > 0) { 2171 if (karg.dataInSize > 0) {
2124 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2172 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2125 MPI_SGE_FLAGS_END_OF_BUFFER | 2173 MPI_SGE_FLAGS_END_OF_BUFFER |
2126 MPI_SGE_FLAGS_DIRECTION | 2174 MPI_SGE_FLAGS_DIRECTION)
2127 mpt_addr_size() )
2128 << MPI_SGE_FLAGS_SHIFT; 2175 << MPI_SGE_FLAGS_SHIFT;
2129 } else { 2176 } else {
2130 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; 2177 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
@@ -2141,8 +2188,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2141 /* Set up this SGE. 2188 /* Set up this SGE.
2142 * Copy to MF and to sglbuf 2189 * Copy to MF and to sglbuf
2143 */ 2190 */
2144 mpt_add_sge(psge, flagsLength, dma_addr_out); 2191 ioc->add_sge(psge, flagsLength, dma_addr_out);
2145 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2192 psge += ioc->SGE_size;
2146 2193
2147 /* Copy user data to kernel space. 2194 /* Copy user data to kernel space.
2148 */ 2195 */
@@ -2175,18 +2222,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2175 /* Set up this SGE 2222 /* Set up this SGE
2176 * Copy to MF and to sglbuf 2223 * Copy to MF and to sglbuf
2177 */ 2224 */
2178 mpt_add_sge(psge, flagsLength, dma_addr_in); 2225 ioc->add_sge(psge, flagsLength, dma_addr_in);
2179 } 2226 }
2180 } 2227 }
2181 } else { 2228 } else {
2182 /* Add a NULL SGE 2229 /* Add a NULL SGE
2183 */ 2230 */
2184 mpt_add_sge(psge, flagsLength, (dma_addr_t) -1); 2231 ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
2185 } 2232 }
2186 2233
2187 ioc->ioctl->wait_done = 0; 2234 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
2235 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2188 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { 2236 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
2189 2237
2238 mutex_lock(&ioc->taskmgmt_cmds.mutex);
2239 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
2240 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2241 goto done_free_mem;
2242 }
2243
2190 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 2244 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
2191 2245
2192 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 2246 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
@@ -2197,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2197 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); 2251 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
2198 if (rc != 0) { 2252 if (rc != 0) {
2199 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2253 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2200 "_send_handshake FAILED! (ioc %p, mf %p)\n", 2254 "send_handshake FAILED! (ioc %p, mf %p)\n",
2201 ioc->name, ioc, mf)); 2255 ioc->name, ioc, mf));
2202 mptctl_free_tm_flags(ioc); 2256 mpt_clear_taskmgmt_in_progress_flag(ioc);
2203 rc = -ENODATA; 2257 rc = -ENODATA;
2258 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2204 goto done_free_mem; 2259 goto done_free_mem;
2205 } 2260 }
2206 } 2261 }
@@ -2210,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2210 2265
2211 /* Now wait for the command to complete */ 2266 /* Now wait for the command to complete */
2212 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2267 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2213 timeout = wait_event_timeout(mptctl_wait, 2268retry_wait:
2214 ioc->ioctl->wait_done == 1, 2269 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2215 HZ*timeout); 2270 HZ*timeout);
2216 2271 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2217 if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) { 2272 rc = -ETIME;
2218 /* Now we need to reset the board */ 2273 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
2219 2274 ioc->name, __func__));
2220 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) 2275 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2221 mptctl_free_tm_flags(ioc); 2276 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2222 2277 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2223 mptctl_timeout_expired(ioc->ioctl); 2278 goto done_free_mem;
2224 rc = -ENODATA; 2279 }
2280 if (!timeleft) {
2281 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2282 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2283 mptctl_timeout_expired(ioc, mf);
2284 mf = NULL;
2285 } else
2286 goto retry_wait;
2225 goto done_free_mem; 2287 goto done_free_mem;
2226 } 2288 }
2227 2289
2290 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2291 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2292
2293
2228 mf = NULL; 2294 mf = NULL;
2229 2295
2230 /* If a valid reply frame, copy to the user. 2296 /* If a valid reply frame, copy to the user.
2231 * Offset 2: reply length in U32's 2297 * Offset 2: reply length in U32's
2232 */ 2298 */
2233 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { 2299 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
2234 if (karg.maxReplyBytes < ioc->reply_sz) { 2300 if (karg.maxReplyBytes < ioc->reply_sz) {
2235 sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); 2301 sz = min(karg.maxReplyBytes,
2302 4*ioc->ioctl_cmds.reply[2]);
2236 } else { 2303 } else {
2237 sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); 2304 sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
2238 } 2305 }
2239
2240 if (sz > 0) { 2306 if (sz > 0) {
2241 if (copy_to_user(karg.replyFrameBufPtr, 2307 if (copy_to_user(karg.replyFrameBufPtr,
2242 &ioc->ioctl->ReplyFrame, sz)){ 2308 ioc->ioctl_cmds.reply, sz)){
2243 printk(MYIOC_s_ERR_FMT 2309 printk(MYIOC_s_ERR_FMT
2244 "%s@%d::mptctl_do_mpt_command - " 2310 "%s@%d::mptctl_do_mpt_command - "
2245 "Unable to write out reply frame %p\n", 2311 "Unable to write out reply frame %p\n",
@@ -2252,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2252 2318
2253 /* If valid sense data, copy to user. 2319 /* If valid sense data, copy to user.
2254 */ 2320 */
2255 if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { 2321 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
2256 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); 2322 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
2257 if (sz > 0) { 2323 if (sz > 0) {
2258 if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { 2324 if (copy_to_user(karg.senseDataPtr,
2325 ioc->ioctl_cmds.sense, sz)) {
2259 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2326 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2260 "Unable to write sense data to user %p\n", 2327 "Unable to write sense data to user %p\n",
2261 ioc->name, __FILE__, __LINE__, 2328 ioc->name, __FILE__, __LINE__,
@@ -2269,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2269 /* If the overall status is _GOOD and data in, copy data 2336 /* If the overall status is _GOOD and data in, copy data
2270 * to user. 2337 * to user.
2271 */ 2338 */
2272 if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && 2339 if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
2273 (karg.dataInSize > 0) && (bufIn.kptr)) { 2340 (karg.dataInSize > 0) && (bufIn.kptr)) {
2274 2341
2275 if (copy_to_user(karg.dataInBufPtr, 2342 if (copy_to_user(karg.dataInBufPtr,
@@ -2284,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2284 2351
2285done_free_mem: 2352done_free_mem:
2286 2353
2287 ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD | 2354 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2288 MPT_IOCTL_STATUS_SENSE_VALID | 2355 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2289 MPT_IOCTL_STATUS_RF_VALID );
2290 2356
2291 /* Free the allocated memory. 2357 /* Free the allocated memory.
2292 */ 2358 */
@@ -2336,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2336 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; 2402 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2337 MPT_FRAME_HDR *mf = NULL; 2403 MPT_FRAME_HDR *mf = NULL;
2338 MPIHeader_t *mpi_hdr; 2404 MPIHeader_t *mpi_hdr;
2405 unsigned long timeleft;
2406 int retval;
2339 2407
2340 /* Reset long to int. Should affect IA64 and SPARC only 2408 /* Reset long to int. Should affect IA64 and SPARC only
2341 */ 2409 */
@@ -2466,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2466 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 2534 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
2467 2535
2468 if (hd && (cim_rev == 1)) { 2536 if (hd && (cim_rev == 1)) {
2469 karg.hard_resets = hd->hard_resets; 2537 karg.hard_resets = ioc->hard_resets;
2470 karg.soft_resets = hd->soft_resets; 2538 karg.soft_resets = ioc->soft_resets;
2471 karg.timeouts = hd->timeouts; 2539 karg.timeouts = ioc->timeouts;
2472 } 2540 }
2473 } 2541 }
2474 2542
@@ -2476,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2476 * Gather ISTWI(Industry Standard Two Wire Interface) Data 2544 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2477 */ 2545 */
2478 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2546 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2479 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2547 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
2480 ioc->name,__func__)); 2548 "%s, no msg frames!!\n", ioc->name, __func__));
2481 goto out; 2549 goto out;
2482 } 2550 }
2483 2551
@@ -2498,22 +2566,29 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2498 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2566 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2499 if (!pbuf) 2567 if (!pbuf)
2500 goto out; 2568 goto out;
2501 mpt_add_sge((char *)&IstwiRWRequest->SGL, 2569 ioc->add_sge((char *)&IstwiRWRequest->SGL,
2502 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); 2570 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2503 2571
2504 ioc->ioctl->wait_done = 0; 2572 retval = 0;
2573 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
2574 IstwiRWRequest->MsgContext);
2575 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2505 mpt_put_msg_frame(mptctl_id, ioc, mf); 2576 mpt_put_msg_frame(mptctl_id, ioc, mf);
2506 2577
2507 rc = wait_event_timeout(mptctl_wait, 2578retry_wait:
2508 ioc->ioctl->wait_done == 1, 2579 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2509 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); 2580 HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
2510 2581 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2511 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { 2582 retval = -ETIME;
2512 /* 2583 printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
2513 * Now we need to reset the board 2584 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2514 */ 2585 mpt_free_msg_frame(ioc, mf);
2515 mpt_free_msg_frame(ioc, mf); 2586 goto out;
2516 mptctl_timeout_expired(ioc->ioctl); 2587 }
2588 if (!timeleft)
2589 mptctl_timeout_expired(ioc, mf);
2590 else
2591 goto retry_wait;
2517 goto out; 2592 goto out;
2518 } 2593 }
2519 2594
@@ -2526,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2526 * bays have drives in them 2601 * bays have drives in them
2527 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) 2602 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2528 */ 2603 */
2529 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) 2604 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
2530 karg.rsvd = *(u32 *)pbuf; 2605 karg.rsvd = *(u32 *)pbuf;
2531 2606
2532 out: 2607 out:
2608 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2609 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2610
2533 if (pbuf) 2611 if (pbuf)
2534 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2612 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2535 2613
@@ -2753,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
2753 2831
2754 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); 2832 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
2755 2833
2756 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2834 mutex_unlock(&iocp->ioctl_cmds.mutex);
2757 2835
2758 return ret; 2836 return ret;
2759} 2837}
@@ -2807,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
2807 */ 2885 */
2808 ret = mptctl_do_mpt_command (karg, &uarg->MF); 2886 ret = mptctl_do_mpt_command (karg, &uarg->MF);
2809 2887
2810 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2888 mutex_unlock(&iocp->ioctl_cmds.mutex);
2811 2889
2812 return ret; 2890 return ret;
2813} 2891}
@@ -2859,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a
2859static int 2937static int
2860mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2938mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2861{ 2939{
2862 MPT_IOCTL *mem;
2863 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2940 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2864 2941
2865 /* 2942 mutex_init(&ioc->ioctl_cmds.mutex);
2866 * Allocate and inite a MPT_IOCTL structure 2943 init_completion(&ioc->ioctl_cmds.done);
2867 */
2868 mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL);
2869 if (!mem) {
2870 mptctl_remove(pdev);
2871 return -ENOMEM;
2872 }
2873
2874 ioc->ioctl = mem;
2875 ioc->ioctl->ioc = ioc;
2876 mutex_init(&ioc->ioctl->ioctl_mutex);
2877 return 0; 2944 return 0;
2878} 2945}
2879 2946
@@ -2887,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2887static void 2954static void
2888mptctl_remove(struct pci_dev *pdev) 2955mptctl_remove(struct pci_dev *pdev)
2889{ 2956{
2890 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2891
2892 kfree ( ioc->ioctl );
2893} 2957}
2894 2958
2895static struct mpt_pci_driver mptctl_driver = { 2959static struct mpt_pci_driver mptctl_driver = {
@@ -2929,6 +2993,7 @@ static int __init mptctl_init(void)
2929 goto out_fail; 2993 goto out_fail;
2930 } 2994 }
2931 2995
2996 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
2932 mpt_reset_register(mptctl_id, mptctl_ioc_reset); 2997 mpt_reset_register(mptctl_id, mptctl_ioc_reset);
2933 mpt_event_register(mptctl_id, mptctl_event_process); 2998 mpt_event_register(mptctl_id, mptctl_event_process);
2934 2999
@@ -2953,6 +3018,7 @@ static void mptctl_exit(void)
2953 3018
2954 /* De-register callback handler from base module */ 3019 /* De-register callback handler from base module */
2955 mpt_deregister(mptctl_id); 3020 mpt_deregister(mptctl_id);
3021 mpt_reset_deregister(mptctl_taskmgmt_id);
2956 3022
2957 mpt_device_driver_deregister(MPTCTL_DRIVER); 3023 mpt_device_driver_deregister(MPTCTL_DRIVER);
2958 3024
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 510b9f492093..28e478879284 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -58,6 +58,7 @@
58#define MPT_DEBUG_FC 0x00080000 58#define MPT_DEBUG_FC 0x00080000
59#define MPT_DEBUG_SAS 0x00100000 59#define MPT_DEBUG_SAS 0x00100000
60#define MPT_DEBUG_SAS_WIDE 0x00200000 60#define MPT_DEBUG_SAS_WIDE 0x00200000
61#define MPT_DEBUG_36GB_MEM 0x00400000
61 62
62/* 63/*
63 * CONFIG_FUSION_LOGGING - enabled in Kconfig 64 * CONFIG_FUSION_LOGGING - enabled in Kconfig
@@ -135,6 +136,8 @@
135#define dsaswideprintk(IOC, CMD) \ 136#define dsaswideprintk(IOC, CMD) \
136 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) 137 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
137 138
139#define d36memprintk(IOC, CMD) \
140 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
138 141
139 142
140/* 143/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c3c24fdf9fb6..e61df133a59e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1251 * A slightly different algorithm is required for 1251 * A slightly different algorithm is required for
1252 * 64bit SGEs. 1252 * 64bit SGEs.
1253 */ 1253 */
1254 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1254 scale = ioc->req_sz/ioc->SGE_size;
1255 if (sizeof(dma_addr_t) == sizeof(u64)) { 1255 if (ioc->sg_addr_size == sizeof(u64)) {
1256 numSGE = (scale - 1) * 1256 numSGE = (scale - 1) *
1257 (ioc->facts.MaxChainDepth-1) + scale + 1257 (ioc->facts.MaxChainDepth-1) + scale +
1258 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1258 (ioc->req_sz - 60) / ioc->SGE_size;
1259 sizeof(u32));
1260 } else { 1259 } else {
1261 numSGE = 1 + (scale - 1) * 1260 numSGE = 1 + (scale - 1) *
1262 (ioc->facts.MaxChainDepth-1) + scale + 1261 (ioc->facts.MaxChainDepth-1) + scale +
1263 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1262 (ioc->req_sz - 64) / ioc->SGE_size;
1264 sizeof(u32));
1265 } 1263 }
1266 1264
1267 if (numSGE < sh->sg_tablesize) { 1265 if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1292 1290
1293 /* Clear the TM flags 1291 /* Clear the TM flags
1294 */ 1292 */
1295 hd->tmPending = 0;
1296 hd->tmState = TM_STATE_NONE;
1297 hd->resetPending = 0;
1298 hd->abortSCpnt = NULL; 1293 hd->abortSCpnt = NULL;
1299 1294
1300 /* Clear the pointer used to store 1295 /* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1312 hd->timer.data = (unsigned long) hd; 1307 hd->timer.data = (unsigned long) hd;
1313 hd->timer.function = mptscsih_timer_expired; 1308 hd->timer.function = mptscsih_timer_expired;
1314 1309
1315 init_waitqueue_head(&hd->scandv_waitq);
1316 hd->scandv_wait_done = 0;
1317 hd->last_queue_full = 0; 1310 hd->last_queue_full = 0;
1318 1311
1319 sh->transportt = mptfc_transport_template; 1312 sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 79f5433359f9..20e0b447e8e8 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -93,8 +93,37 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; 93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ 94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; 95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
96 96static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
97static void mptsas_hotplug_work(struct work_struct *work); 97
98static void mptsas_firmware_event_work(struct work_struct *work);
99static void mptsas_send_sas_event(struct fw_event_work *fw_event);
100static void mptsas_send_raid_event(struct fw_event_work *fw_event);
101static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
102static void mptsas_parse_device_info(struct sas_identify *identify,
103 struct mptsas_devinfo *device_info);
104static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
105 struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
106static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
107 (MPT_ADAPTER *ioc, u64 sas_address);
108static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
109 struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
110static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
111 struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
112static int mptsas_add_end_device(MPT_ADAPTER *ioc,
113 struct mptsas_phyinfo *phy_info);
114static void mptsas_del_end_device(MPT_ADAPTER *ioc,
115 struct mptsas_phyinfo *phy_info);
116static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
117static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
118 (MPT_ADAPTER *ioc, u64 sas_address);
119static void mptsas_expander_delete(MPT_ADAPTER *ioc,
120 struct mptsas_portinfo *port_info, u8 force);
121static void mptsas_send_expander_event(struct fw_event_work *fw_event);
122static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
123static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
124static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
125static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
126static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
98 127
99static void mptsas_print_phy_data(MPT_ADAPTER *ioc, 128static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
100 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) 129 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -218,30 +247,125 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
218 le16_to_cpu(pg1->AttachedDevHandle))); 247 le16_to_cpu(pg1->AttachedDevHandle)));
219} 248}
220 249
221static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy) 250/* inhibit sas firmware event handling */
251static void
252mptsas_fw_event_off(MPT_ADAPTER *ioc)
222{ 253{
223 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 254 unsigned long flags;
224 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 255
256 spin_lock_irqsave(&ioc->fw_event_lock, flags);
257 ioc->fw_events_off = 1;
258 ioc->sas_discovery_quiesce_io = 0;
259 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
260
225} 261}
226 262
227static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) 263/* enable sas firmware event handling */
264static void
265mptsas_fw_event_on(MPT_ADAPTER *ioc)
228{ 266{
229 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); 267 unsigned long flags;
230 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 268
269 spin_lock_irqsave(&ioc->fw_event_lock, flags);
270 ioc->fw_events_off = 0;
271 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
231} 272}
232 273
233static struct mptsas_portinfo * 274/* queue a sas firmware event */
234mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) 275static void
276mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
277 unsigned long delay)
235{ 278{
236 struct list_head *head = &ioc->sas_topology; 279 unsigned long flags;
237 struct mptsas_portinfo *pi = NULL; 280
281 spin_lock_irqsave(&ioc->fw_event_lock, flags);
282 list_add_tail(&fw_event->list, &ioc->fw_event_list);
283 INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
284 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
285 ioc->name, __func__, fw_event));
286 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
287 delay);
288 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
289}
290
291/* requeue a sas firmware event */
292static void
293mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
294 unsigned long delay)
295{
296 unsigned long flags;
297 spin_lock_irqsave(&ioc->fw_event_lock, flags);
298 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
299 "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
300 fw_event->retries++;
301 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
302 msecs_to_jiffies(delay));
303 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
304}
305
306/* free memory assoicated to a sas firmware event */
307static void
308mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&ioc->fw_event_lock, flags);
313 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
314 ioc->name, __func__, fw_event));
315 list_del(&fw_event->list);
316 kfree(fw_event);
317 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
318}
319
320/* walk the firmware event queue, and either stop or wait for
321 * outstanding events to complete */
322static void
323mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
324{
325 struct fw_event_work *fw_event, *next;
326 struct mptsas_target_reset_event *target_reset_list, *n;
327 u8 flush_q;
328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
329
330 /* flush the target_reset_list */
331 if (!list_empty(&hd->target_reset_list)) {
332 list_for_each_entry_safe(target_reset_list, n,
333 &hd->target_reset_list, list) {
334 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
335 "%s: removing target reset for id=%d\n",
336 ioc->name, __func__,
337 target_reset_list->sas_event_data.TargetID));
338 list_del(&target_reset_list->list);
339 kfree(target_reset_list);
340 }
341 }
342
343 if (list_empty(&ioc->fw_event_list) ||
344 !ioc->fw_event_q || in_interrupt())
345 return;
238 346
239 /* always the first entry on sas_topology list */ 347 flush_q = 0;
348 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
349 if (cancel_delayed_work(&fw_event->work))
350 mptsas_free_fw_event(ioc, fw_event);
351 else
352 flush_q = 1;
353 }
354 if (flush_q)
355 flush_workqueue(ioc->fw_event_q);
356}
240 357
241 if (!list_empty(head))
242 pi = list_entry(head->next, struct mptsas_portinfo, list);
243 358
244 return pi; 359static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
360{
361 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
362 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
363}
364
365static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
366{
367 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
368 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
245} 369}
246 370
247/* 371/*
@@ -265,6 +389,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
265 return rc; 389 return rc;
266} 390}
267 391
392/**
393 * mptsas_find_portinfo_by_sas_address -
394 * @ioc: Pointer to MPT_ADAPTER structure
395 * @handle:
396 *
397 * This function should be called with the sas_topology_mutex already held
398 *
399 **/
400static struct mptsas_portinfo *
401mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
402{
403 struct mptsas_portinfo *port_info, *rc = NULL;
404 int i;
405
406 if (sas_address >= ioc->hba_port_sas_addr &&
407 sas_address < (ioc->hba_port_sas_addr +
408 ioc->hba_port_num_phy))
409 return ioc->hba_port_info;
410
411 mutex_lock(&ioc->sas_topology_mutex);
412 list_for_each_entry(port_info, &ioc->sas_topology, list)
413 for (i = 0; i < port_info->num_phys; i++)
414 if (port_info->phy_info[i].identify.sas_address ==
415 sas_address) {
416 rc = port_info;
417 goto out;
418 }
419 out:
420 mutex_unlock(&ioc->sas_topology_mutex);
421 return rc;
422}
423
268/* 424/*
269 * Returns true if there is a scsi end device 425 * Returns true if there is a scsi end device
270 */ 426 */
@@ -308,6 +464,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
308 if(phy_info->port_details != port_details) 464 if(phy_info->port_details != port_details)
309 continue; 465 continue;
310 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 466 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
467 mptsas_set_rphy(ioc, phy_info, NULL);
311 phy_info->port_details = NULL; 468 phy_info->port_details = NULL;
312 } 469 }
313 kfree(port_details); 470 kfree(port_details);
@@ -379,6 +536,285 @@ starget)
379 phy_info->port_details->starget = starget; 536 phy_info->port_details->starget = starget;
380} 537}
381 538
539/**
540 * mptsas_add_device_component -
541 * @ioc: Pointer to MPT_ADAPTER structure
542 * @channel: fw mapped id's
543 * @id:
544 * @sas_address:
545 * @device_info:
546 *
547 **/
548static void
549mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
550 u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
551{
552 struct mptsas_device_info *sas_info, *next;
553 struct scsi_device *sdev;
554 struct scsi_target *starget;
555 struct sas_rphy *rphy;
556
557 /*
558 * Delete all matching devices out of the list
559 */
560 mutex_lock(&ioc->sas_device_info_mutex);
561 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
562 list) {
563 if (!sas_info->is_logical_volume &&
564 (sas_info->sas_address == sas_address ||
565 (sas_info->fw.channel == channel &&
566 sas_info->fw.id == id))) {
567 list_del(&sas_info->list);
568 kfree(sas_info);
569 }
570 }
571
572 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
573 if (!sas_info)
574 goto out;
575
576 /*
577 * Set Firmware mapping
578 */
579 sas_info->fw.id = id;
580 sas_info->fw.channel = channel;
581
582 sas_info->sas_address = sas_address;
583 sas_info->device_info = device_info;
584 sas_info->slot = slot;
585 sas_info->enclosure_logical_id = enclosure_logical_id;
586 INIT_LIST_HEAD(&sas_info->list);
587 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
588
589 /*
590 * Set OS mapping
591 */
592 shost_for_each_device(sdev, ioc->sh) {
593 starget = scsi_target(sdev);
594 rphy = dev_to_rphy(starget->dev.parent);
595 if (rphy->identify.sas_address == sas_address) {
596 sas_info->os.id = starget->id;
597 sas_info->os.channel = starget->channel;
598 }
599 }
600
601 out:
602 mutex_unlock(&ioc->sas_device_info_mutex);
603 return;
604}
605
606/**
607 * mptsas_add_device_component_by_fw -
608 * @ioc: Pointer to MPT_ADAPTER structure
609 * @channel: fw mapped id's
610 * @id:
611 *
612 **/
613static void
614mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
615{
616 struct mptsas_devinfo sas_device;
617 struct mptsas_enclosure enclosure_info;
618 int rc;
619
620 rc = mptsas_sas_device_pg0(ioc, &sas_device,
621 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
622 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
623 (channel << 8) + id);
624 if (rc)
625 return;
626
627 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
628 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
629 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
630 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
631 sas_device.handle_enclosure);
632
633 mptsas_add_device_component(ioc, sas_device.channel,
634 sas_device.id, sas_device.sas_address, sas_device.device_info,
635 sas_device.slot, enclosure_info.enclosure_logical_id);
636}
637
638/**
639 * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
640 * @ioc: Pointer to MPT_ADAPTER structure
641 * @channel: fw mapped id's
642 * @id:
643 *
644 **/
645static void
646mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
647 struct scsi_target *starget)
648{
649 CONFIGPARMS cfg;
650 ConfigPageHeader_t hdr;
651 dma_addr_t dma_handle;
652 pRaidVolumePage0_t buffer = NULL;
653 int i;
654 RaidPhysDiskPage0_t phys_disk;
655 struct mptsas_device_info *sas_info, *next;
656
657 memset(&cfg, 0 , sizeof(CONFIGPARMS));
658 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
659 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
660 /* assumption that all volumes on channel = 0 */
661 cfg.pageAddr = starget->id;
662 cfg.cfghdr.hdr = &hdr;
663 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
664 cfg.timeout = 10;
665
666 if (mpt_config(ioc, &cfg) != 0)
667 goto out;
668
669 if (!hdr.PageLength)
670 goto out;
671
672 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
673 &dma_handle);
674
675 if (!buffer)
676 goto out;
677
678 cfg.physAddr = dma_handle;
679 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
680
681 if (mpt_config(ioc, &cfg) != 0)
682 goto out;
683
684 if (!buffer->NumPhysDisks)
685 goto out;
686
687 /*
688 * Adding entry for hidden components
689 */
690 for (i = 0; i < buffer->NumPhysDisks; i++) {
691
692 if (mpt_raid_phys_disk_pg0(ioc,
693 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
694 continue;
695
696 mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
697 phys_disk.PhysDiskID);
698
699 mutex_lock(&ioc->sas_device_info_mutex);
700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
701 list) {
702 if (!sas_info->is_logical_volume &&
703 (sas_info->fw.channel == phys_disk.PhysDiskBus &&
704 sas_info->fw.id == phys_disk.PhysDiskID)) {
705 sas_info->is_hidden_raid_component = 1;
706 sas_info->volume_id = starget->id;
707 }
708 }
709 mutex_unlock(&ioc->sas_device_info_mutex);
710
711 }
712
713 /*
714 * Delete all matching devices out of the list
715 */
716 mutex_lock(&ioc->sas_device_info_mutex);
717 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
718 list) {
719 if (sas_info->is_logical_volume && sas_info->fw.id ==
720 starget->id) {
721 list_del(&sas_info->list);
722 kfree(sas_info);
723 }
724 }
725
726 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
727 if (sas_info) {
728 sas_info->fw.id = starget->id;
729 sas_info->os.id = starget->id;
730 sas_info->os.channel = starget->channel;
731 sas_info->is_logical_volume = 1;
732 INIT_LIST_HEAD(&sas_info->list);
733 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
734 }
735 mutex_unlock(&ioc->sas_device_info_mutex);
736
737 out:
738 if (buffer)
739 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
740 dma_handle);
741}
742
743/**
744 * mptsas_add_device_component_starget -
745 * @ioc: Pointer to MPT_ADAPTER structure
746 * @starget:
747 *
748 **/
749static void
750mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
751 struct scsi_target *starget)
752{
753 VirtTarget *vtarget;
754 struct sas_rphy *rphy;
755 struct mptsas_phyinfo *phy_info = NULL;
756 struct mptsas_enclosure enclosure_info;
757
758 rphy = dev_to_rphy(starget->dev.parent);
759 vtarget = starget->hostdata;
760 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
761 rphy->identify.sas_address);
762 if (!phy_info)
763 return;
764
765 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
766 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
767 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
768 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
769 phy_info->attached.handle_enclosure);
770
771 mptsas_add_device_component(ioc, phy_info->attached.channel,
772 phy_info->attached.id, phy_info->attached.sas_address,
773 phy_info->attached.device_info,
774 phy_info->attached.slot, enclosure_info.enclosure_logical_id);
775}
776
777/**
778 * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
779 * @ioc: Pointer to MPT_ADAPTER structure
780 * @channel: os mapped id's
781 * @id:
782 *
783 **/
784static void
785mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
786{
787 struct mptsas_device_info *sas_info, *next;
788
789 /*
790 * Set is_cached flag
791 */
792 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
793 list) {
794 if (sas_info->os.channel == channel && sas_info->os.id == id)
795 sas_info->is_cached = 1;
796 }
797}
798
799/**
800 * mptsas_del_device_components - Cleaning the list
801 * @ioc: Pointer to MPT_ADAPTER structure
802 *
803 **/
804static void
805mptsas_del_device_components(MPT_ADAPTER *ioc)
806{
807 struct mptsas_device_info *sas_info, *next;
808
809 mutex_lock(&ioc->sas_device_info_mutex);
810 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
811 list) {
812 list_del(&sas_info->list);
813 kfree(sas_info);
814 }
815 mutex_unlock(&ioc->sas_device_info_mutex);
816}
817
382 818
383/* 819/*
384 * mptsas_setup_wide_ports 820 * mptsas_setup_wide_ports
@@ -434,8 +870,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
434 * Forming a port 870 * Forming a port
435 */ 871 */
436 if (!port_details) { 872 if (!port_details) {
437 port_details = kzalloc(sizeof(*port_details), 873 port_details = kzalloc(sizeof(struct
438 GFP_KERNEL); 874 mptsas_portinfo_details), GFP_KERNEL);
439 if (!port_details) 875 if (!port_details)
440 goto out; 876 goto out;
441 port_details->num_phys = 1; 877 port_details->num_phys = 1;
@@ -523,15 +959,62 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
523 VirtTarget *vtarget = NULL; 959 VirtTarget *vtarget = NULL;
524 960
525 shost_for_each_device(sdev, ioc->sh) { 961 shost_for_each_device(sdev, ioc->sh) {
526 if ((vdevice = sdev->hostdata) == NULL) 962 vdevice = sdev->hostdata;
963 if ((vdevice == NULL) ||
964 (vdevice->vtarget == NULL))
965 continue;
966 if ((vdevice->vtarget->tflags &
967 MPT_TARGET_FLAGS_RAID_COMPONENT ||
968 vdevice->vtarget->raidVolume))
527 continue; 969 continue;
528 if (vdevice->vtarget->id == id && 970 if (vdevice->vtarget->id == id &&
529 vdevice->vtarget->channel == channel) 971 vdevice->vtarget->channel == channel)
530 vtarget = vdevice->vtarget; 972 vtarget = vdevice->vtarget;
531 } 973 }
532 return vtarget; 974 return vtarget;
533} 975}
534 976
977static void
978mptsas_queue_device_delete(MPT_ADAPTER *ioc,
979 MpiEventDataSasDeviceStatusChange_t *sas_event_data)
980{
981 struct fw_event_work *fw_event;
982 int sz;
983
984 sz = offsetof(struct fw_event_work, event_data) +
985 sizeof(MpiEventDataSasDeviceStatusChange_t);
986 fw_event = kzalloc(sz, GFP_ATOMIC);
987 if (!fw_event) {
988 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
989 ioc->name, __func__, __LINE__);
990 return;
991 }
992 memcpy(fw_event->event_data, sas_event_data,
993 sizeof(MpiEventDataSasDeviceStatusChange_t));
994 fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
995 fw_event->ioc = ioc;
996 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
997}
998
999static void
1000mptsas_queue_rescan(MPT_ADAPTER *ioc)
1001{
1002 struct fw_event_work *fw_event;
1003 int sz;
1004
1005 sz = offsetof(struct fw_event_work, event_data);
1006 fw_event = kzalloc(sz, GFP_ATOMIC);
1007 if (!fw_event) {
1008 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
1009 ioc->name, __func__, __LINE__);
1010 return;
1011 }
1012 fw_event->event = -1;
1013 fw_event->ioc = ioc;
1014 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
1015}
1016
1017
535/** 1018/**
536 * mptsas_target_reset 1019 * mptsas_target_reset
537 * 1020 *
@@ -550,13 +1033,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
550{ 1033{
551 MPT_FRAME_HDR *mf; 1034 MPT_FRAME_HDR *mf;
552 SCSITaskMgmt_t *pScsiTm; 1035 SCSITaskMgmt_t *pScsiTm;
553 1036 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
556 ioc->name,__func__, __LINE__));
557 return 0; 1037 return 0;
1038
1039
1040 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
1041 if (mf == NULL) {
1042 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
1043 "%s, no msg frames @%d!!\n", ioc->name,
1044 __func__, __LINE__));
1045 goto out_fail;
558 } 1046 }
559 1047
1048 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1049 ioc->name, mf));
1050
560 /* Format the Request 1051 /* Format the Request
561 */ 1052 */
562 pScsiTm = (SCSITaskMgmt_t *) mf; 1053 pScsiTm = (SCSITaskMgmt_t *) mf;
@@ -569,9 +1060,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
569 1060
570 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 1061 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
571 1062
572 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1063 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1064 "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
1065 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
1066
1067 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
573 1068
574 return 1; 1069 return 1;
1070
1071 out_fail:
1072
1073 mpt_clear_taskmgmt_in_progress_flag(ioc);
1074 return 0;
575} 1075}
576 1076
577/** 1077/**
@@ -602,11 +1102,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
602 1102
603 vtarget->deleted = 1; /* block IO */ 1103 vtarget->deleted = 1; /* block IO */
604 1104
605 target_reset_list = kzalloc(sizeof(*target_reset_list), 1105 target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
606 GFP_ATOMIC); 1106 GFP_ATOMIC);
607 if (!target_reset_list) { 1107 if (!target_reset_list) {
608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 1108 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
609 ioc->name,__func__, __LINE__)); 1109 "%s, failed to allocate mem @%d..!!\n",
1110 ioc->name, __func__, __LINE__));
610 return; 1111 return;
611 } 1112 }
612 1113
@@ -614,84 +1115,101 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
614 sizeof(*sas_event_data)); 1115 sizeof(*sas_event_data));
615 list_add_tail(&target_reset_list->list, &hd->target_reset_list); 1116 list_add_tail(&target_reset_list->list, &hd->target_reset_list);
616 1117
617 if (hd->resetPending) 1118 target_reset_list->time_count = jiffies;
618 return;
619 1119
620 if (mptsas_target_reset(ioc, channel, id)) { 1120 if (mptsas_target_reset(ioc, channel, id)) {
621 target_reset_list->target_reset_issued = 1; 1121 target_reset_list->target_reset_issued = 1;
622 hd->resetPending = 1;
623 } 1122 }
624} 1123}
625 1124
626/** 1125/**
627 * mptsas_dev_reset_complete 1126 * mptsas_taskmgmt_complete - complete SAS task management function
628 * 1127 * @ioc: Pointer to MPT_ADAPTER structure
629 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
630 * enable work queue to finish off removing device from upper layers.
631 * then send next TARGET_RESET in the queue.
632 *
633 * @ioc
634 * 1128 *
1129 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
1130 * queue to finish off removing device from upper layers. then send next
1131 * TARGET_RESET in the queue.
635 **/ 1132 **/
636static void 1133static int
637mptsas_dev_reset_complete(MPT_ADAPTER *ioc) 1134mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
638{ 1135{
639 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 1136 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
640 struct list_head *head = &hd->target_reset_list; 1137 struct list_head *head = &hd->target_reset_list;
641 struct mptsas_target_reset_event *target_reset_list;
642 struct mptsas_hotplug_event *ev;
643 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
644 u8 id, channel; 1138 u8 id, channel;
645 __le64 sas_address; 1139 struct mptsas_target_reset_event *target_reset_list;
1140 SCSITaskMgmtReply_t *pScsiTmReply;
1141
1142 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
1143 "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
1144
1145 pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
1146 if (pScsiTmReply) {
1147 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1148 "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
1149 "\ttask_type = 0x%02X, iocstatus = 0x%04X "
1150 "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
1151 "term_cmnds = %d\n", ioc->name,
1152 pScsiTmReply->Bus, pScsiTmReply->TargetID,
1153 pScsiTmReply->TaskType,
1154 le16_to_cpu(pScsiTmReply->IOCStatus),
1155 le32_to_cpu(pScsiTmReply->IOCLogInfo),
1156 pScsiTmReply->ResponseCode,
1157 le32_to_cpu(pScsiTmReply->TerminationCount)));
1158
1159 if (pScsiTmReply->ResponseCode)
1160 mptscsih_taskmgmt_response_code(ioc,
1161 pScsiTmReply->ResponseCode);
1162 }
1163
1164 if (pScsiTmReply && (pScsiTmReply->TaskType ==
1165 MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
1166 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
1167 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1168 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
1169 memcpy(ioc->taskmgmt_cmds.reply, mr,
1170 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
1171 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
1172 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
1173 complete(&ioc->taskmgmt_cmds.done);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179 mpt_clear_taskmgmt_in_progress_flag(ioc);
646 1180
647 if (list_empty(head)) 1181 if (list_empty(head))
648 return; 1182 return 1;
649 1183
650 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); 1184 target_reset_list = list_entry(head->next,
1185 struct mptsas_target_reset_event, list);
651 1186
652 sas_event_data = &target_reset_list->sas_event_data; 1187 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
653 id = sas_event_data->TargetID; 1188 "TaskMgmt: completed (%d seconds)\n",
654 channel = sas_event_data->Bus; 1189 ioc->name, jiffies_to_msecs(jiffies -
655 hd->resetPending = 0; 1190 target_reset_list->time_count)/1000));
1191
1192 id = pScsiTmReply->TargetID;
1193 channel = pScsiTmReply->Bus;
1194 target_reset_list->time_count = jiffies;
656 1195
657 /* 1196 /*
658 * retry target reset 1197 * retry target reset
659 */ 1198 */
660 if (!target_reset_list->target_reset_issued) { 1199 if (!target_reset_list->target_reset_issued) {
661 if (mptsas_target_reset(ioc, channel, id)) { 1200 if (mptsas_target_reset(ioc, channel, id))
662 target_reset_list->target_reset_issued = 1; 1201 target_reset_list->target_reset_issued = 1;
663 hd->resetPending = 1; 1202 return 1;
664 }
665 return;
666 } 1203 }
667 1204
668 /* 1205 /*
669 * enable work queue to remove device from upper layers 1206 * enable work queue to remove device from upper layers
670 */ 1207 */
671 list_del(&target_reset_list->list); 1208 list_del(&target_reset_list->list);
1209 if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
1210 mptsas_queue_device_delete(ioc,
1211 &target_reset_list->sas_event_data);
672 1212
673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
674 if (!ev) {
675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
676 ioc->name,__func__, __LINE__));
677 return;
678 }
679
680 INIT_WORK(&ev->work, mptsas_hotplug_work);
681 ev->ioc = ioc;
682 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
683 ev->parent_handle =
684 le16_to_cpu(sas_event_data->ParentDevHandle);
685 ev->channel = channel;
686 ev->id =id;
687 ev->phy_id = sas_event_data->PhyNum;
688 memcpy(&sas_address, &sas_event_data->SASAddress,
689 sizeof(__le64));
690 ev->sas_address = le64_to_cpu(sas_address);
691 ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
692 ev->event_type = MPTSAS_DEL_DEVICE;
693 schedule_work(&ev->work);
694 kfree(target_reset_list);
695 1213
696 /* 1214 /*
697 * issue target reset to next device in the queue 1215 * issue target reset to next device in the queue
@@ -699,34 +1217,19 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
699 1217
700 head = &hd->target_reset_list; 1218 head = &hd->target_reset_list;
701 if (list_empty(head)) 1219 if (list_empty(head))
702 return; 1220 return 1;
703 1221
704 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, 1222 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
705 list); 1223 list);
706 1224
707 sas_event_data = &target_reset_list->sas_event_data; 1225 id = target_reset_list->sas_event_data.TargetID;
708 id = sas_event_data->TargetID; 1226 channel = target_reset_list->sas_event_data.Bus;
709 channel = sas_event_data->Bus; 1227 target_reset_list->time_count = jiffies;
710 1228
711 if (mptsas_target_reset(ioc, channel, id)) { 1229 if (mptsas_target_reset(ioc, channel, id))
712 target_reset_list->target_reset_issued = 1; 1230 target_reset_list->target_reset_issued = 1;
713 hd->resetPending = 1;
714 }
715}
716 1231
717/** 1232 return 1;
718 * mptsas_taskmgmt_complete
719 *
720 * @ioc
721 * @mf
722 * @mr
723 *
724 **/
725static int
726mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
727{
728 mptsas_dev_reset_complete(ioc);
729 return mptscsih_taskmgmt_complete(ioc, mf, mr);
730} 1233}
731 1234
732/** 1235/**
@@ -740,37 +1243,59 @@ static int
740mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 1243mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
741{ 1244{
742 MPT_SCSI_HOST *hd; 1245 MPT_SCSI_HOST *hd;
743 struct mptsas_target_reset_event *target_reset_list, *n;
744 int rc; 1246 int rc;
745 1247
746 rc = mptscsih_ioc_reset(ioc, reset_phase); 1248 rc = mptscsih_ioc_reset(ioc, reset_phase);
1249 if ((ioc->bus_type != SAS) || (!rc))
1250 return rc;
747 1251
748 if (ioc->bus_type != SAS)
749 goto out;
750
751 if (reset_phase != MPT_IOC_POST_RESET)
752 goto out;
753
754 if (!ioc->sh || !ioc->sh->hostdata)
755 goto out;
756 hd = shost_priv(ioc->sh); 1252 hd = shost_priv(ioc->sh);
757 if (!hd->ioc) 1253 if (!hd->ioc)
758 goto out; 1254 goto out;
759 1255
760 if (list_empty(&hd->target_reset_list)) 1256 switch (reset_phase) {
761 goto out; 1257 case MPT_IOC_SETUP_RESET:
762 1258 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
763 /* flush the target_reset_list */ 1259 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
764 list_for_each_entry_safe(target_reset_list, n, 1260 mptsas_fw_event_off(ioc);
765 &hd->target_reset_list, list) { 1261 break;
766 list_del(&target_reset_list->list); 1262 case MPT_IOC_PRE_RESET:
767 kfree(target_reset_list); 1263 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1264 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
1265 break;
1266 case MPT_IOC_POST_RESET:
1267 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1268 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
1269 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1270 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
1271 complete(&ioc->sas_mgmt.done);
1272 }
1273 mptsas_cleanup_fw_event_q(ioc);
1274 mptsas_queue_rescan(ioc);
1275 mptsas_fw_event_on(ioc);
1276 break;
1277 default:
1278 break;
768 } 1279 }
769 1280
770 out: 1281 out:
771 return rc; 1282 return rc;
772} 1283}
773 1284
1285
1286/**
1287 * enum device_state -
1288 * @DEVICE_RETRY: need to retry the TUR
1289 * @DEVICE_ERROR: TUR return error, don't add device
1290 * @DEVICE_READY: device can be added
1291 *
1292 */
1293enum device_state{
1294 DEVICE_RETRY,
1295 DEVICE_ERROR,
1296 DEVICE_READY,
1297};
1298
774static int 1299static int
775mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 1300mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
776 u32 form, u32 form_specific) 1301 u32 form, u32 form_specific)
@@ -836,15 +1361,308 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
836 return error; 1361 return error;
837} 1362}
838 1363
1364/**
1365 * mptsas_add_end_device - report a new end device to sas transport layer
1366 * @ioc: Pointer to MPT_ADAPTER structure
1367 * @phy_info: decribes attached device
1368 *
1369 * return (0) success (1) failure
1370 *
1371 **/
1372static int
1373mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1374{
1375 struct sas_rphy *rphy;
1376 struct sas_port *port;
1377 struct sas_identify identify;
1378 char *ds = NULL;
1379 u8 fw_id;
1380
1381 if (!phy_info) {
1382 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1383 "%s: exit at line=%d\n", ioc->name,
1384 __func__, __LINE__));
1385 return 1;
1386 }
1387
1388 fw_id = phy_info->attached.id;
1389
1390 if (mptsas_get_rphy(phy_info)) {
1391 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1392 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1393 __func__, fw_id, __LINE__));
1394 return 2;
1395 }
1396
1397 port = mptsas_get_port(phy_info);
1398 if (!port) {
1399 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1400 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1401 __func__, fw_id, __LINE__));
1402 return 3;
1403 }
1404
1405 if (phy_info->attached.device_info &
1406 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1407 ds = "ssp";
1408 if (phy_info->attached.device_info &
1409 MPI_SAS_DEVICE_INFO_STP_TARGET)
1410 ds = "stp";
1411 if (phy_info->attached.device_info &
1412 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1413 ds = "sata";
1414
1415 printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
1416 " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
1417 phy_info->attached.channel, phy_info->attached.id,
1418 phy_info->attached.phy_id, (unsigned long long)
1419 phy_info->attached.sas_address);
1420
1421 mptsas_parse_device_info(&identify, &phy_info->attached);
1422 rphy = sas_end_device_alloc(port);
1423 if (!rphy) {
1424 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1425 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1426 __func__, fw_id, __LINE__));
1427 return 5; /* non-fatal: an rphy can be added later */
1428 }
1429
1430 rphy->identify = identify;
1431 if (sas_rphy_add(rphy)) {
1432 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1433 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1434 __func__, fw_id, __LINE__));
1435 sas_rphy_free(rphy);
1436 return 6;
1437 }
1438 mptsas_set_rphy(ioc, phy_info, rphy);
1439 return 0;
1440}
1441
1442/**
1443 * mptsas_del_end_device - report a deleted end device to sas transport layer
1444 * @ioc: Pointer to MPT_ADAPTER structure
1445 * @phy_info: decribes attached device
1446 *
1447 **/
1448static void
1449mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1450{
1451 struct sas_rphy *rphy;
1452 struct sas_port *port;
1453 struct mptsas_portinfo *port_info;
1454 struct mptsas_phyinfo *phy_info_parent;
1455 int i;
1456 char *ds = NULL;
1457 u8 fw_id;
1458 u64 sas_address;
1459
1460 if (!phy_info)
1461 return;
1462
1463 fw_id = phy_info->attached.id;
1464 sas_address = phy_info->attached.sas_address;
1465
1466 if (!phy_info->port_details) {
1467 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1468 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1469 __func__, fw_id, __LINE__));
1470 return;
1471 }
1472 rphy = mptsas_get_rphy(phy_info);
1473 if (!rphy) {
1474 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1475 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1476 __func__, fw_id, __LINE__));
1477 return;
1478 }
1479
1480 if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
1481 || phy_info->attached.device_info
1482 & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
1483 || phy_info->attached.device_info
1484 & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
1485 ds = "initiator";
1486 if (phy_info->attached.device_info &
1487 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1488 ds = "ssp";
1489 if (phy_info->attached.device_info &
1490 MPI_SAS_DEVICE_INFO_STP_TARGET)
1491 ds = "stp";
1492 if (phy_info->attached.device_info &
1493 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1494 ds = "sata";
1495
1496 dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
1497 "removing %s device: fw_channel %d, fw_id %d, phy %d,"
1498 "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
1499 phy_info->attached.id, phy_info->attached.phy_id,
1500 (unsigned long long) sas_address);
1501
1502 port = mptsas_get_port(phy_info);
1503 if (!port) {
1504 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1505 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1506 __func__, fw_id, __LINE__));
1507 return;
1508 }
1509 port_info = phy_info->portinfo;
1510 phy_info_parent = port_info->phy_info;
1511 for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
1512 if (!phy_info_parent->phy)
1513 continue;
1514 if (phy_info_parent->attached.sas_address !=
1515 sas_address)
1516 continue;
1517 dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
1518 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
1519 ioc->name, phy_info_parent->phy_id,
1520 phy_info_parent->phy);
1521 sas_port_delete_phy(port, phy_info_parent->phy);
1522 }
1523
1524 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
1525 "delete port %d, sas_addr (0x%llx)\n", ioc->name,
1526 port->port_identifier, (unsigned long long)sas_address);
1527 sas_port_delete(port);
1528 mptsas_set_port(ioc, phy_info, NULL);
1529 mptsas_port_delete(ioc, phy_info->port_details);
1530}
1531
1532struct mptsas_phyinfo *
1533mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
1534 struct mptsas_devinfo *sas_device)
1535{
1536 struct mptsas_phyinfo *phy_info;
1537 struct mptsas_portinfo *port_info;
1538 int i;
1539
1540 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
1541 sas_device->sas_address);
1542 if (!phy_info)
1543 goto out;
1544 port_info = phy_info->portinfo;
1545 if (!port_info)
1546 goto out;
1547 mutex_lock(&ioc->sas_topology_mutex);
1548 for (i = 0; i < port_info->num_phys; i++) {
1549 if (port_info->phy_info[i].attached.sas_address !=
1550 sas_device->sas_address)
1551 continue;
1552 port_info->phy_info[i].attached.channel = sas_device->channel;
1553 port_info->phy_info[i].attached.id = sas_device->id;
1554 port_info->phy_info[i].attached.sas_address =
1555 sas_device->sas_address;
1556 port_info->phy_info[i].attached.handle = sas_device->handle;
1557 port_info->phy_info[i].attached.handle_parent =
1558 sas_device->handle_parent;
1559 port_info->phy_info[i].attached.handle_enclosure =
1560 sas_device->handle_enclosure;
1561 }
1562 mutex_unlock(&ioc->sas_topology_mutex);
1563 out:
1564 return phy_info;
1565}
1566
1567/**
1568 * mptsas_firmware_event_work - work thread for processing fw events
1569 * @work: work queue payload containing info describing the event
1570 * Context: user
1571 *
1572 */
1573static void
1574mptsas_firmware_event_work(struct work_struct *work)
1575{
1576 struct fw_event_work *fw_event =
1577 container_of(work, struct fw_event_work, work.work);
1578 MPT_ADAPTER *ioc = fw_event->ioc;
1579
1580 /* special rescan topology handling */
1581 if (fw_event->event == -1) {
1582 if (ioc->in_rescan) {
1583 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1584 "%s: rescan ignored as it is in progress\n",
1585 ioc->name, __func__));
1586 return;
1587 }
1588 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
1589 "reset\n", ioc->name, __func__));
1590 ioc->in_rescan = 1;
1591 mptsas_not_responding_devices(ioc);
1592 mptsas_scan_sas_topology(ioc);
1593 ioc->in_rescan = 0;
1594 mptsas_free_fw_event(ioc, fw_event);
1595 return;
1596 }
1597
1598 /* events handling turned off during host reset */
1599 if (ioc->fw_events_off) {
1600 mptsas_free_fw_event(ioc, fw_event);
1601 return;
1602 }
1603
1604 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
1605 "event = (0x%02x)\n", ioc->name, __func__, fw_event,
1606 (fw_event->event & 0xFF)));
1607
1608 switch (fw_event->event) {
1609 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1610 mptsas_send_sas_event(fw_event);
1611 break;
1612 case MPI_EVENT_INTEGRATED_RAID:
1613 mptsas_send_raid_event(fw_event);
1614 break;
1615 case MPI_EVENT_IR2:
1616 mptsas_send_ir2_event(fw_event);
1617 break;
1618 case MPI_EVENT_PERSISTENT_TABLE_FULL:
1619 mptbase_sas_persist_operation(ioc,
1620 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1621 mptsas_free_fw_event(ioc, fw_event);
1622 break;
1623 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
1624 mptsas_broadcast_primative_work(fw_event);
1625 break;
1626 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
1627 mptsas_send_expander_event(fw_event);
1628 break;
1629 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1630 mptsas_send_link_status_event(fw_event);
1631 break;
1632 case MPI_EVENT_QUEUE_FULL:
1633 mptsas_handle_queue_full_event(fw_event);
1634 break;
1635 }
1636}
1637
1638
1639
839static int 1640static int
840mptsas_slave_configure(struct scsi_device *sdev) 1641mptsas_slave_configure(struct scsi_device *sdev)
841{ 1642{
1643 struct Scsi_Host *host = sdev->host;
1644 MPT_SCSI_HOST *hd = shost_priv(host);
1645 MPT_ADAPTER *ioc = hd->ioc;
1646 VirtDevice *vdevice = sdev->hostdata;
842 1647
843 if (sdev->channel == MPTSAS_RAID_CHANNEL) 1648 if (vdevice->vtarget->deleted) {
1649 sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
1650 vdevice->vtarget->deleted = 0;
1651 }
1652
1653 /*
1654 * RAID volumes placed beyond the last expected port.
1655 * Ignore sending sas mode pages in that case..
1656 */
1657 if (sdev->channel == MPTSAS_RAID_CHANNEL) {
1658 mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
844 goto out; 1659 goto out;
1660 }
845 1661
846 sas_read_port_mode_page(sdev); 1662 sas_read_port_mode_page(sdev);
847 1663
1664 mptsas_add_device_component_starget(ioc, scsi_target(sdev));
1665
848 out: 1666 out:
849 return mptscsih_slave_configure(sdev); 1667 return mptscsih_slave_configure(sdev);
850} 1668}
@@ -875,9 +1693,18 @@ mptsas_target_alloc(struct scsi_target *starget)
875 * RAID volumes placed beyond the last expected port. 1693 * RAID volumes placed beyond the last expected port.
876 */ 1694 */
877 if (starget->channel == MPTSAS_RAID_CHANNEL) { 1695 if (starget->channel == MPTSAS_RAID_CHANNEL) {
878 for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) 1696 if (!ioc->raid_data.pIocPg2) {
879 if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) 1697 kfree(vtarget);
880 channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; 1698 return -ENXIO;
1699 }
1700 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1701 if (id == ioc->raid_data.pIocPg2->
1702 RaidVolume[i].VolumeID) {
1703 channel = ioc->raid_data.pIocPg2->
1704 RaidVolume[i].VolumeBus;
1705 }
1706 }
1707 vtarget->raidVolume = 1;
881 goto out; 1708 goto out;
882 } 1709 }
883 1710
@@ -926,11 +1753,18 @@ mptsas_target_destroy(struct scsi_target *starget)
926 struct sas_rphy *rphy; 1753 struct sas_rphy *rphy;
927 struct mptsas_portinfo *p; 1754 struct mptsas_portinfo *p;
928 int i; 1755 int i;
929 MPT_ADAPTER *ioc = hd->ioc; 1756 MPT_ADAPTER *ioc = hd->ioc;
1757 VirtTarget *vtarget;
930 1758
931 if (!starget->hostdata) 1759 if (!starget->hostdata)
932 return; 1760 return;
933 1761
1762 vtarget = starget->hostdata;
1763
1764 mptsas_del_device_component_by_os(ioc, starget->channel,
1765 starget->id);
1766
1767
934 if (starget->channel == MPTSAS_RAID_CHANNEL) 1768 if (starget->channel == MPTSAS_RAID_CHANNEL)
935 goto out; 1769 goto out;
936 1770
@@ -940,12 +1774,21 @@ mptsas_target_destroy(struct scsi_target *starget)
940 if (p->phy_info[i].attached.sas_address != 1774 if (p->phy_info[i].attached.sas_address !=
941 rphy->identify.sas_address) 1775 rphy->identify.sas_address)
942 continue; 1776 continue;
1777
1778 starget_printk(KERN_INFO, starget, MYIOC_s_FMT
1779 "delete device: fw_channel %d, fw_id %d, phy %d, "
1780 "sas_addr 0x%llx\n", ioc->name,
1781 p->phy_info[i].attached.channel,
1782 p->phy_info[i].attached.id,
1783 p->phy_info[i].attached.phy_id, (unsigned long long)
1784 p->phy_info[i].attached.sas_address);
1785
943 mptsas_set_starget(&p->phy_info[i], NULL); 1786 mptsas_set_starget(&p->phy_info[i], NULL);
944 goto out;
945 } 1787 }
946 } 1788 }
947 1789
948 out: 1790 out:
1791 vtarget->starget = NULL;
949 kfree(starget->hostdata); 1792 kfree(starget->hostdata);
950 starget->hostdata = NULL; 1793 starget->hostdata = NULL;
951} 1794}
@@ -1008,6 +1851,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
1008static int 1851static int
1009mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1852mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1010{ 1853{
1854 MPT_SCSI_HOST *hd;
1855 MPT_ADAPTER *ioc;
1011 VirtDevice *vdevice = SCpnt->device->hostdata; 1856 VirtDevice *vdevice = SCpnt->device->hostdata;
1012 1857
1013 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { 1858 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
@@ -1016,6 +1861,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1016 return 0; 1861 return 0;
1017 } 1862 }
1018 1863
1864 hd = shost_priv(SCpnt->device->host);
1865 ioc = hd->ioc;
1866
1867 if (ioc->sas_discovery_quiesce_io)
1868 return SCSI_MLQUEUE_HOST_BUSY;
1869
1019// scsi_print_command(SCpnt); 1870// scsi_print_command(SCpnt);
1020 1871
1021 return mptscsih_qcmd(SCpnt,done); 1872 return mptscsih_qcmd(SCpnt,done);
@@ -1114,14 +1965,19 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
1114static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, 1965static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
1115 MPT_FRAME_HDR *reply) 1966 MPT_FRAME_HDR *reply)
1116{ 1967{
1117 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD; 1968 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1118 if (reply != NULL) { 1969 if (reply != NULL) {
1119 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID; 1970 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
1120 memcpy(ioc->sas_mgmt.reply, reply, 1971 memcpy(ioc->sas_mgmt.reply, reply,
1121 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); 1972 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
1122 } 1973 }
1123 complete(&ioc->sas_mgmt.done); 1974
1124 return 1; 1975 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1976 ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
1977 complete(&ioc->sas_mgmt.done);
1978 return 1;
1979 }
1980 return 0;
1125} 1981}
1126 1982
1127static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) 1983static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1160,6 +2016,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1160 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET; 2016 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
1161 req->PhyNum = phy->identify.phy_identifier; 2017 req->PhyNum = phy->identify.phy_identifier;
1162 2018
2019 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1163 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2020 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1164 2021
1165 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 2022 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
@@ -1174,7 +2031,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1174 2031
1175 /* a reply frame is expected */ 2032 /* a reply frame is expected */
1176 if ((ioc->sas_mgmt.status & 2033 if ((ioc->sas_mgmt.status &
1177 MPT_IOCTL_STATUS_RF_VALID) == 0) { 2034 MPT_MGMT_STATUS_RF_VALID) == 0) {
1178 error = -ENXIO; 2035 error = -ENXIO;
1179 goto out_unlock; 2036 goto out_unlock;
1180 } 2037 }
@@ -1191,6 +2048,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1191 error = 0; 2048 error = 0;
1192 2049
1193 out_unlock: 2050 out_unlock:
2051 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1194 mutex_unlock(&ioc->sas_mgmt.mutex); 2052 mutex_unlock(&ioc->sas_mgmt.mutex);
1195 out: 2053 out:
1196 return error; 2054 return error;
@@ -1304,7 +2162,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1304 struct mptsas_portinfo *port_info; 2162 struct mptsas_portinfo *port_info;
1305 2163
1306 mutex_lock(&ioc->sas_topology_mutex); 2164 mutex_lock(&ioc->sas_topology_mutex);
1307 port_info = mptsas_get_hba_portinfo(ioc); 2165 port_info = ioc->hba_port_info;
1308 if (port_info && port_info->phy_info) 2166 if (port_info && port_info->phy_info)
1309 sas_address = 2167 sas_address =
1310 port_info->phy_info[0].phy->identify.sas_address; 2168 port_info->phy_info[0].phy->identify.sas_address;
@@ -1319,26 +2177,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1319 /* request */ 2177 /* request */
1320 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2178 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 2179 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 2180 MPI_SGE_FLAGS_DIRECTION)
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 2181 << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (blk_rq_bytes(req) - 4); 2182 flagsLength |= (blk_rq_bytes(req) - 4);
1325 2183
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 2184 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 2185 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 2186 if (!dma_addr_out)
1329 goto put_mf; 2187 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 2188 ioc->add_sge(psge, flagsLength, dma_addr_out);
1331 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2189 psge += ioc->SGE_size;
1332 2190
1333 /* response */ 2191 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 2192 flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2193 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
2194 MPI_SGE_FLAGS_IOC_TO_HOST |
2195 MPI_SGE_FLAGS_END_OF_BUFFER;
2196
2197 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
1335 flagsLength |= blk_rq_bytes(rsp) + 4; 2198 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 2199 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 2200 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 2201 if (!dma_addr_in)
1339 goto unmap; 2202 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 2203 ioc->add_sge(psge, flagsLength, dma_addr_in);
1341 2204
2205 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1342 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2206 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1343 2207
1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 2208 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
@@ -1351,7 +2215,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1351 } 2215 }
1352 mf = NULL; 2216 mf = NULL;
1353 2217
1354 if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) { 2218 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
1355 SmpPassthroughReply_t *smprep; 2219 SmpPassthroughReply_t *smprep;
1356 2220
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 2221 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
@@ -1360,7 +2224,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1360 req->resid_len = 0; 2224 req->resid_len = 0;
1361 rsp->resid_len -= smprep->ResponseDataLength; 2225 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 2226 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 2227 printk(MYIOC_s_ERR_FMT
2228 "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 2229 ioc->name, __func__);
1365 ret = -ENXIO; 2230 ret = -ENXIO;
1366 } 2231 }
@@ -1375,6 +2240,7 @@ put_mf:
1375 if (mf) 2240 if (mf)
1376 mpt_free_msg_frame(ioc, mf); 2241 mpt_free_msg_frame(ioc, mf);
1377out_unlock: 2242out_unlock:
2243 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1378 mutex_unlock(&ioc->sas_mgmt.mutex); 2244 mutex_unlock(&ioc->sas_mgmt.mutex);
1379out: 2245out:
1380 return ret; 2246 return ret;
@@ -1438,7 +2304,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
1438 2304
1439 port_info->num_phys = buffer->NumPhys; 2305 port_info->num_phys = buffer->NumPhys;
1440 port_info->phy_info = kcalloc(port_info->num_phys, 2306 port_info->phy_info = kcalloc(port_info->num_phys,
1441 sizeof(*port_info->phy_info),GFP_KERNEL); 2307 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1442 if (!port_info->phy_info) { 2308 if (!port_info->phy_info) {
1443 error = -ENOMEM; 2309 error = -ENOMEM;
1444 goto out_free_consistent; 2310 goto out_free_consistent;
@@ -1600,10 +2466,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1600 __le64 sas_address; 2466 __le64 sas_address;
1601 int error=0; 2467 int error=0;
1602 2468
1603 if (ioc->sas_discovery_runtime &&
1604 mptsas_is_end_device(device_info))
1605 goto out;
1606
1607 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; 2469 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
1608 hdr.ExtPageLength = 0; 2470 hdr.ExtPageLength = 0;
1609 hdr.PageNumber = 0; 2471 hdr.PageNumber = 0;
@@ -1644,6 +2506,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1644 2506
1645 mptsas_print_device_pg0(ioc, buffer); 2507 mptsas_print_device_pg0(ioc, buffer);
1646 2508
2509 memset(device_info, 0, sizeof(struct mptsas_devinfo));
1647 device_info->handle = le16_to_cpu(buffer->DevHandle); 2510 device_info->handle = le16_to_cpu(buffer->DevHandle);
1648 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); 2511 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
1649 device_info->handle_enclosure = 2512 device_info->handle_enclosure =
@@ -1675,7 +2538,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1675 SasExpanderPage0_t *buffer; 2538 SasExpanderPage0_t *buffer;
1676 dma_addr_t dma_handle; 2539 dma_addr_t dma_handle;
1677 int i, error; 2540 int i, error;
2541 __le64 sas_address;
1678 2542
2543 memset(port_info, 0, sizeof(struct mptsas_portinfo));
1679 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 2544 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1680 hdr.ExtPageLength = 0; 2545 hdr.ExtPageLength = 0;
1681 hdr.PageNumber = 0; 2546 hdr.PageNumber = 0;
@@ -1721,18 +2586,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1721 } 2586 }
1722 2587
1723 /* save config data */ 2588 /* save config data */
1724 port_info->num_phys = buffer->NumPhys; 2589 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
1725 port_info->phy_info = kcalloc(port_info->num_phys, 2590 port_info->phy_info = kcalloc(port_info->num_phys,
1726 sizeof(*port_info->phy_info),GFP_KERNEL); 2591 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1727 if (!port_info->phy_info) { 2592 if (!port_info->phy_info) {
1728 error = -ENOMEM; 2593 error = -ENOMEM;
1729 goto out_free_consistent; 2594 goto out_free_consistent;
1730 } 2595 }
1731 2596
2597 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
1732 for (i = 0; i < port_info->num_phys; i++) { 2598 for (i = 0; i < port_info->num_phys; i++) {
1733 port_info->phy_info[i].portinfo = port_info; 2599 port_info->phy_info[i].portinfo = port_info;
1734 port_info->phy_info[i].handle = 2600 port_info->phy_info[i].handle =
1735 le16_to_cpu(buffer->DevHandle); 2601 le16_to_cpu(buffer->DevHandle);
2602 port_info->phy_info[i].identify.sas_address =
2603 le64_to_cpu(sas_address);
2604 port_info->phy_info[i].identify.handle_parent =
2605 le16_to_cpu(buffer->ParentDevHandle);
1736 } 2606 }
1737 2607
1738 out_free_consistent: 2608 out_free_consistent:
@@ -1752,11 +2622,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1752 dma_addr_t dma_handle; 2622 dma_addr_t dma_handle;
1753 int error=0; 2623 int error=0;
1754 2624
1755 if (ioc->sas_discovery_runtime && 2625 hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
1756 mptsas_is_end_device(&phy_info->attached))
1757 goto out;
1758
1759 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1760 hdr.ExtPageLength = 0; 2626 hdr.ExtPageLength = 0;
1761 hdr.PageNumber = 1; 2627 hdr.PageNumber = 1;
1762 hdr.Reserved1 = 0; 2628 hdr.Reserved1 = 0;
@@ -1791,6 +2657,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1791 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2657 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
1792 2658
1793 error = mpt_config(ioc, &cfg); 2659 error = mpt_config(ioc, &cfg);
2660
2661 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2662 error = -ENODEV;
2663 goto out;
2664 }
2665
1794 if (error) 2666 if (error)
1795 goto out_free_consistent; 2667 goto out_free_consistent;
1796 2668
@@ -2010,16 +2882,21 @@ static int mptsas_probe_one_phy(struct device *dev,
2010 goto out; 2882 goto out;
2011 } 2883 }
2012 mptsas_set_port(ioc, phy_info, port); 2884 mptsas_set_port(ioc, phy_info, port);
2013 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2885 devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
2014 "sas_port_alloc: port=%p dev=%p port_id=%d\n", 2886 MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
2015 ioc->name, port, dev, port->port_identifier)); 2887 ioc->name, port->port_identifier,
2888 (unsigned long long)phy_info->
2889 attached.sas_address));
2016 } 2890 }
2017 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", 2891 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2018 ioc->name, phy_info->phy_id)); 2892 "sas_port_add_phy: phy_id=%d\n",
2893 ioc->name, phy_info->phy_id));
2019 sas_port_add_phy(port, phy_info->phy); 2894 sas_port_add_phy(port, phy_info->phy);
2020 phy_info->sas_port_add_phy = 0; 2895 phy_info->sas_port_add_phy = 0;
2896 devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
2897 MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
2898 phy_info->phy_id, phy_info->phy));
2021 } 2899 }
2022
2023 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { 2900 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
2024 2901
2025 struct sas_rphy *rphy; 2902 struct sas_rphy *rphy;
@@ -2032,18 +2909,17 @@ static int mptsas_probe_one_phy(struct device *dev,
2032 * the adding/removing of devices that occur 2909 * the adding/removing of devices that occur
2033 * after start of day. 2910 * after start of day.
2034 */ 2911 */
2035 if (ioc->sas_discovery_runtime && 2912 if (mptsas_is_end_device(&phy_info->attached) &&
2036 mptsas_is_end_device(&phy_info->attached)) 2913 phy_info->attached.handle_parent) {
2037 goto out; 2914 goto out;
2915 }
2038 2916
2039 mptsas_parse_device_info(&identify, &phy_info->attached); 2917 mptsas_parse_device_info(&identify, &phy_info->attached);
2040 if (scsi_is_host_device(parent)) { 2918 if (scsi_is_host_device(parent)) {
2041 struct mptsas_portinfo *port_info; 2919 struct mptsas_portinfo *port_info;
2042 int i; 2920 int i;
2043 2921
2044 mutex_lock(&ioc->sas_topology_mutex); 2922 port_info = ioc->hba_port_info;
2045 port_info = mptsas_get_hba_portinfo(ioc);
2046 mutex_unlock(&ioc->sas_topology_mutex);
2047 2923
2048 for (i = 0; i < port_info->num_phys; i++) 2924 for (i = 0; i < port_info->num_phys; i++)
2049 if (port_info->phy_info[i].identify.sas_address == 2925 if (port_info->phy_info[i].identify.sas_address ==
@@ -2102,7 +2978,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2102 struct mptsas_portinfo *port_info, *hba; 2978 struct mptsas_portinfo *port_info, *hba;
2103 int error = -ENOMEM, i; 2979 int error = -ENOMEM, i;
2104 2980
2105 hba = kzalloc(sizeof(*port_info), GFP_KERNEL); 2981 hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2106 if (! hba) 2982 if (! hba)
2107 goto out; 2983 goto out;
2108 2984
@@ -2112,9 +2988,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2112 2988
2113 mptsas_sas_io_unit_pg1(ioc); 2989 mptsas_sas_io_unit_pg1(ioc);
2114 mutex_lock(&ioc->sas_topology_mutex); 2990 mutex_lock(&ioc->sas_topology_mutex);
2115 port_info = mptsas_get_hba_portinfo(ioc); 2991 port_info = ioc->hba_port_info;
2116 if (!port_info) { 2992 if (!port_info) {
2117 port_info = hba; 2993 ioc->hba_port_info = port_info = hba;
2994 ioc->hba_port_num_phy = port_info->num_phys;
2118 list_add_tail(&port_info->list, &ioc->sas_topology); 2995 list_add_tail(&port_info->list, &ioc->sas_topology);
2119 } else { 2996 } else {
2120 for (i = 0; i < hba->num_phys; i++) { 2997 for (i = 0; i < hba->num_phys; i++) {
@@ -2130,15 +3007,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2130 hba = NULL; 3007 hba = NULL;
2131 } 3008 }
2132 mutex_unlock(&ioc->sas_topology_mutex); 3009 mutex_unlock(&ioc->sas_topology_mutex);
3010#if defined(CPQ_CIM)
3011 ioc->num_ports = port_info->num_phys;
3012#endif
2133 for (i = 0; i < port_info->num_phys; i++) { 3013 for (i = 0; i < port_info->num_phys; i++) {
2134 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 3014 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
2135 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 3015 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
2136 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 3016 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
2137 3017 port_info->phy_info[i].identify.handle =
3018 port_info->phy_info[i].handle;
2138 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, 3019 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
2139 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3020 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2140 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3021 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2141 port_info->phy_info[i].handle); 3022 port_info->phy_info[i].identify.handle);
3023 if (!ioc->hba_port_sas_addr)
3024 ioc->hba_port_sas_addr =
3025 port_info->phy_info[i].identify.sas_address;
2142 port_info->phy_info[i].identify.phy_id = 3026 port_info->phy_info[i].identify.phy_id =
2143 port_info->phy_info[i].phy_id = i; 3027 port_info->phy_info[i].phy_id = i;
2144 if (port_info->phy_info[i].attached.handle) 3028 if (port_info->phy_info[i].attached.handle)
@@ -2163,248 +3047,721 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2163 return error; 3047 return error;
2164} 3048}
2165 3049
2166static int 3050static void
2167mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) 3051mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
2168{ 3052{
2169 struct mptsas_portinfo *port_info, *p, *ex; 3053 struct mptsas_portinfo *parent;
2170 struct device *parent; 3054 struct device *parent_dev;
2171 struct sas_rphy *rphy; 3055 struct sas_rphy *rphy;
2172 int error = -ENOMEM, i, j; 3056 int i;
2173 3057 u64 sas_address; /* expander sas address */
2174 ex = kzalloc(sizeof(*port_info), GFP_KERNEL); 3058 u32 handle;
2175 if (!ex) 3059
2176 goto out; 3060 handle = port_info->phy_info[0].handle;
2177 3061 sas_address = port_info->phy_info[0].identify.sas_address;
2178 error = mptsas_sas_expander_pg0(ioc, ex,
2179 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
2180 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
2181 if (error)
2182 goto out_free_port_info;
2183
2184 *handle = ex->phy_info[0].handle;
2185
2186 mutex_lock(&ioc->sas_topology_mutex);
2187 port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
2188 if (!port_info) {
2189 port_info = ex;
2190 list_add_tail(&port_info->list, &ioc->sas_topology);
2191 } else {
2192 for (i = 0; i < ex->num_phys; i++) {
2193 port_info->phy_info[i].handle =
2194 ex->phy_info[i].handle;
2195 port_info->phy_info[i].port_id =
2196 ex->phy_info[i].port_id;
2197 }
2198 kfree(ex->phy_info);
2199 kfree(ex);
2200 ex = NULL;
2201 }
2202 mutex_unlock(&ioc->sas_topology_mutex);
2203
2204 for (i = 0; i < port_info->num_phys; i++) { 3062 for (i = 0; i < port_info->num_phys; i++) {
2205 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], 3063 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
2206 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << 3064 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
2207 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); 3065 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
2208 3066
2209 if (port_info->phy_info[i].identify.handle) { 3067 mptsas_sas_device_pg0(ioc,
2210 mptsas_sas_device_pg0(ioc, 3068 &port_info->phy_info[i].identify,
2211 &port_info->phy_info[i].identify, 3069 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2212 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3070 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2213 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3071 port_info->phy_info[i].identify.handle);
2214 port_info->phy_info[i].identify.handle); 3072 port_info->phy_info[i].identify.phy_id =
2215 port_info->phy_info[i].identify.phy_id = 3073 port_info->phy_info[i].phy_id;
2216 port_info->phy_info[i].phy_id;
2217 }
2218 3074
2219 if (port_info->phy_info[i].attached.handle) { 3075 if (port_info->phy_info[i].attached.handle) {
2220 mptsas_sas_device_pg0(ioc, 3076 mptsas_sas_device_pg0(ioc,
2221 &port_info->phy_info[i].attached, 3077 &port_info->phy_info[i].attached,
2222 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3078 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2223 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3079 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2224 port_info->phy_info[i].attached.handle); 3080 port_info->phy_info[i].attached.handle);
2225 port_info->phy_info[i].attached.phy_id = 3081 port_info->phy_info[i].attached.phy_id =
2226 port_info->phy_info[i].phy_id; 3082 port_info->phy_info[i].phy_id;
2227 } 3083 }
2228 } 3084 }
2229 3085
2230 parent = &ioc->sh->shost_gendev; 3086 mutex_lock(&ioc->sas_topology_mutex);
2231 for (i = 0; i < port_info->num_phys; i++) { 3087 parent = mptsas_find_portinfo_by_handle(ioc,
2232 mutex_lock(&ioc->sas_topology_mutex); 3088 port_info->phy_info[0].identify.handle_parent);
2233 list_for_each_entry(p, &ioc->sas_topology, list) { 3089 if (!parent) {
2234 for (j = 0; j < p->num_phys; j++) {
2235 if (port_info->phy_info[i].identify.handle !=
2236 p->phy_info[j].attached.handle)
2237 continue;
2238 rphy = mptsas_get_rphy(&p->phy_info[j]);
2239 parent = &rphy->dev;
2240 }
2241 }
2242 mutex_unlock(&ioc->sas_topology_mutex); 3090 mutex_unlock(&ioc->sas_topology_mutex);
3091 return;
2243 } 3092 }
3093 for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
3094 i++) {
3095 if (parent->phy_info[i].attached.sas_address == sas_address) {
3096 rphy = mptsas_get_rphy(&parent->phy_info[i]);
3097 parent_dev = &rphy->dev;
3098 }
3099 }
3100 mutex_unlock(&ioc->sas_topology_mutex);
2244 3101
2245 mptsas_setup_wide_ports(ioc, port_info); 3102 mptsas_setup_wide_ports(ioc, port_info);
2246
2247 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++) 3103 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
2248 mptsas_probe_one_phy(parent, &port_info->phy_info[i], 3104 mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
2249 ioc->sas_index, 0); 3105 ioc->sas_index, 0);
3106}
2250 3107
2251 return 0; 3108static void
3109mptsas_expander_event_add(MPT_ADAPTER *ioc,
3110 MpiEventDataSasExpanderStatusChange_t *expander_data)
3111{
3112 struct mptsas_portinfo *port_info;
3113 int i;
3114 __le64 sas_address;
2252 3115
2253 out_free_port_info: 3116 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2254 if (ex) { 3117 if (!port_info)
2255 kfree(ex->phy_info); 3118 BUG();
2256 kfree(ex); 3119 port_info->num_phys = (expander_data->NumPhys) ?
3120 expander_data->NumPhys : 1;
3121 port_info->phy_info = kcalloc(port_info->num_phys,
3122 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
3123 if (!port_info->phy_info)
3124 BUG();
3125 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3126 for (i = 0; i < port_info->num_phys; i++) {
3127 port_info->phy_info[i].portinfo = port_info;
3128 port_info->phy_info[i].handle =
3129 le16_to_cpu(expander_data->DevHandle);
3130 port_info->phy_info[i].identify.sas_address =
3131 le64_to_cpu(sas_address);
3132 port_info->phy_info[i].identify.handle_parent =
3133 le16_to_cpu(expander_data->ParentDevHandle);
3134 }
3135
3136 mutex_lock(&ioc->sas_topology_mutex);
3137 list_add_tail(&port_info->list, &ioc->sas_topology);
3138 mutex_unlock(&ioc->sas_topology_mutex);
3139
3140 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3141 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3142 (unsigned long long)sas_address);
3143
3144 mptsas_expander_refresh(ioc, port_info);
3145}
3146
3147/**
3148 * mptsas_delete_expander_siblings - remove siblings attached to expander
3149 * @ioc: Pointer to MPT_ADAPTER structure
3150 * @parent: the parent port_info object
3151 * @expander: the expander port_info object
3152 **/
3153static void
3154mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
3155 *parent, struct mptsas_portinfo *expander)
3156{
3157 struct mptsas_phyinfo *phy_info;
3158 struct mptsas_portinfo *port_info;
3159 struct sas_rphy *rphy;
3160 int i;
3161
3162 phy_info = expander->phy_info;
3163 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3164 rphy = mptsas_get_rphy(phy_info);
3165 if (!rphy)
3166 continue;
3167 if (rphy->identify.device_type == SAS_END_DEVICE)
3168 mptsas_del_end_device(ioc, phy_info);
3169 }
3170
3171 phy_info = expander->phy_info;
3172 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3173 rphy = mptsas_get_rphy(phy_info);
3174 if (!rphy)
3175 continue;
3176 if (rphy->identify.device_type ==
3177 MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
3178 rphy->identify.device_type ==
3179 MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
3180 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3181 rphy->identify.sas_address);
3182 if (!port_info)
3183 continue;
3184 if (port_info == parent) /* backlink rphy */
3185 continue;
3186 /*
3187 Delete this expander even if the expdevpage is exists
3188 because the parent expander is already deleted
3189 */
3190 mptsas_expander_delete(ioc, port_info, 1);
3191 }
3192 }
3193}
3194
3195
3196/**
3197 * mptsas_expander_delete - remove this expander
3198 * @ioc: Pointer to MPT_ADAPTER structure
3199 * @port_info: expander port_info struct
3200 * @force: Flag to forcefully delete the expander
3201 *
3202 **/
3203
3204static void mptsas_expander_delete(MPT_ADAPTER *ioc,
3205 struct mptsas_portinfo *port_info, u8 force)
3206{
3207
3208 struct mptsas_portinfo *parent;
3209 int i;
3210 u64 expander_sas_address;
3211 struct mptsas_phyinfo *phy_info;
3212 struct mptsas_portinfo buffer;
3213 struct mptsas_portinfo_details *port_details;
3214 struct sas_port *port;
3215
3216 if (!port_info)
3217 return;
3218
3219 /* see if expander is still there before deleting */
3220 mptsas_sas_expander_pg0(ioc, &buffer,
3221 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3222 MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
3223 port_info->phy_info[0].identify.handle);
3224
3225 if (buffer.num_phys) {
3226 kfree(buffer.phy_info);
3227 if (!force)
3228 return;
3229 }
3230
3231
3232 /*
3233 * Obtain the port_info instance to the parent port
3234 */
3235 port_details = NULL;
3236 expander_sas_address =
3237 port_info->phy_info[0].identify.sas_address;
3238 parent = mptsas_find_portinfo_by_handle(ioc,
3239 port_info->phy_info[0].identify.handle_parent);
3240 mptsas_delete_expander_siblings(ioc, parent, port_info);
3241 if (!parent)
3242 goto out;
3243
3244 /*
3245 * Delete rphys in the parent that point
3246 * to this expander.
3247 */
3248 phy_info = parent->phy_info;
3249 port = NULL;
3250 for (i = 0; i < parent->num_phys; i++, phy_info++) {
3251 if (!phy_info->phy)
3252 continue;
3253 if (phy_info->attached.sas_address !=
3254 expander_sas_address)
3255 continue;
3256 if (!port) {
3257 port = mptsas_get_port(phy_info);
3258 port_details = phy_info->port_details;
3259 }
3260 dev_printk(KERN_DEBUG, &phy_info->phy->dev,
3261 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
3262 phy_info->phy_id, phy_info->phy);
3263 sas_port_delete_phy(port, phy_info->phy);
3264 }
3265 if (port) {
3266 dev_printk(KERN_DEBUG, &port->dev,
3267 MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
3268 ioc->name, port->port_identifier,
3269 (unsigned long long)expander_sas_address);
3270 sas_port_delete(port);
3271 mptsas_port_delete(ioc, port_details);
2257 } 3272 }
2258 out: 3273 out:
2259 return error; 3274
3275 printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
3276 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3277 (unsigned long long)expander_sas_address);
3278
3279 /*
3280 * free link
3281 */
3282 list_del(&port_info->list);
3283 kfree(port_info->phy_info);
3284 kfree(port_info);
2260} 3285}
2261 3286
2262/* 3287
2263 * mptsas_delete_expander_phys 3288/**
3289 * mptsas_send_expander_event - expanders events
3290 * @ioc: Pointer to MPT_ADAPTER structure
3291 * @expander_data: event data
2264 * 3292 *
2265 * 3293 *
2266 * This will traverse topology, and remove expanders 3294 * This function handles adding, removing, and refreshing
2267 * that are no longer present 3295 * device handles within the expander objects.
2268 */ 3296 */
2269static void 3297static void
2270mptsas_delete_expander_phys(MPT_ADAPTER *ioc) 3298mptsas_send_expander_event(struct fw_event_work *fw_event)
2271{ 3299{
2272 struct mptsas_portinfo buffer; 3300 MPT_ADAPTER *ioc;
2273 struct mptsas_portinfo *port_info, *n, *parent; 3301 MpiEventDataSasExpanderStatusChange_t *expander_data;
2274 struct mptsas_phyinfo *phy_info; 3302 struct mptsas_portinfo *port_info;
2275 struct sas_port * port; 3303 __le64 sas_address;
2276 int i; 3304 int i;
2277 u64 expander_sas_address;
2278 3305
3306 ioc = fw_event->ioc;
3307 expander_data = (MpiEventDataSasExpanderStatusChange_t *)
3308 fw_event->event_data;
3309 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3310 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3311
3312 if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
3313 if (port_info) {
3314 for (i = 0; i < port_info->num_phys; i++) {
3315 port_info->phy_info[i].portinfo = port_info;
3316 port_info->phy_info[i].handle =
3317 le16_to_cpu(expander_data->DevHandle);
3318 port_info->phy_info[i].identify.sas_address =
3319 le64_to_cpu(sas_address);
3320 port_info->phy_info[i].identify.handle_parent =
3321 le16_to_cpu(expander_data->ParentDevHandle);
3322 }
3323 mptsas_expander_refresh(ioc, port_info);
3324 } else if (!port_info && expander_data->NumPhys)
3325 mptsas_expander_event_add(ioc, expander_data);
3326 } else if (expander_data->ReasonCode ==
3327 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
3328 mptsas_expander_delete(ioc, port_info, 0);
3329
3330 mptsas_free_fw_event(ioc, fw_event);
3331}
3332
3333
3334/**
3335 * mptsas_expander_add -
3336 * @ioc: Pointer to MPT_ADAPTER structure
3337 * @handle:
3338 *
3339 */
3340struct mptsas_portinfo *
3341mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
3342{
3343 struct mptsas_portinfo buffer, *port_info;
3344 int i;
3345
3346 if ((mptsas_sas_expander_pg0(ioc, &buffer,
3347 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3348 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
3349 return NULL;
3350
3351 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
3352 if (!port_info) {
3353 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3354 "%s: exit at line=%d\n", ioc->name,
3355 __func__, __LINE__));
3356 return NULL;
3357 }
3358 port_info->num_phys = buffer.num_phys;
3359 port_info->phy_info = buffer.phy_info;
3360 for (i = 0; i < port_info->num_phys; i++)
3361 port_info->phy_info[i].portinfo = port_info;
2279 mutex_lock(&ioc->sas_topology_mutex); 3362 mutex_lock(&ioc->sas_topology_mutex);
2280 list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { 3363 list_add_tail(&port_info->list, &ioc->sas_topology);
3364 mutex_unlock(&ioc->sas_topology_mutex);
3365 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3366 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3367 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3368 mptsas_expander_refresh(ioc, port_info);
3369 return port_info;
3370}
2281 3371
2282 if (!(port_info->phy_info[0].identify.device_info & 3372static void
2283 MPI_SAS_DEVICE_INFO_SMP_TARGET)) 3373mptsas_send_link_status_event(struct fw_event_work *fw_event)
3374{
3375 MPT_ADAPTER *ioc;
3376 MpiEventDataSasPhyLinkStatus_t *link_data;
3377 struct mptsas_portinfo *port_info;
3378 struct mptsas_phyinfo *phy_info = NULL;
3379 __le64 sas_address;
3380 u8 phy_num;
3381 u8 link_rate;
3382
3383 ioc = fw_event->ioc;
3384 link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
3385
3386 memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
3387 sas_address = le64_to_cpu(sas_address);
3388 link_rate = link_data->LinkRates >> 4;
3389 phy_num = link_data->PhyNum;
3390
3391 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3392 if (port_info) {
3393 phy_info = &port_info->phy_info[phy_num];
3394 if (phy_info)
3395 phy_info->negotiated_link_rate = link_rate;
3396 }
3397
3398 if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
3399 link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
3400
3401 if (!port_info) {
3402 if (ioc->old_sas_discovery_protocal) {
3403 port_info = mptsas_expander_add(ioc,
3404 le16_to_cpu(link_data->DevHandle));
3405 if (port_info)
3406 goto out;
3407 }
3408 goto out;
3409 }
3410
3411 if (port_info == ioc->hba_port_info)
3412 mptsas_probe_hba_phys(ioc);
3413 else
3414 mptsas_expander_refresh(ioc, port_info);
3415 } else if (phy_info && phy_info->phy) {
3416 if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
3417 phy_info->phy->negotiated_linkrate =
3418 SAS_PHY_DISABLED;
3419 else if (link_rate ==
3420 MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
3421 phy_info->phy->negotiated_linkrate =
3422 SAS_LINK_RATE_FAILED;
3423 else
3424 phy_info->phy->negotiated_linkrate =
3425 SAS_LINK_RATE_UNKNOWN;
3426 }
3427 out:
3428 mptsas_free_fw_event(ioc, fw_event);
3429}
3430
3431static void
3432mptsas_not_responding_devices(MPT_ADAPTER *ioc)
3433{
3434 struct mptsas_portinfo buffer, *port_info;
3435 struct mptsas_device_info *sas_info;
3436 struct mptsas_devinfo sas_device;
3437 u32 handle;
3438 VirtTarget *vtarget = NULL;
3439 struct mptsas_phyinfo *phy_info;
3440 u8 found_expander;
3441 int retval, retry_count;
3442 unsigned long flags;
3443
3444 mpt_findImVolumes(ioc);
3445
3446 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3447 if (ioc->ioc_reset_in_progress) {
3448 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3449 "%s: exiting due to a parallel reset \n", ioc->name,
3450 __func__));
3451 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3452 return;
3453 }
3454 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3455
3456 /* devices, logical volumes */
3457 mutex_lock(&ioc->sas_device_info_mutex);
3458 redo_device_scan:
3459 list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
3460 if (sas_info->is_cached)
2284 continue; 3461 continue;
3462 if (!sas_info->is_logical_volume) {
3463 sas_device.handle = 0;
3464 retry_count = 0;
3465retry_page:
3466 retval = mptsas_sas_device_pg0(ioc, &sas_device,
3467 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
3468 << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3469 (sas_info->fw.channel << 8) +
3470 sas_info->fw.id);
3471
3472 if (sas_device.handle)
3473 continue;
3474 if (retval == -EBUSY) {
3475 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3476 if (ioc->ioc_reset_in_progress) {
3477 dfailprintk(ioc,
3478 printk(MYIOC_s_DEBUG_FMT
3479 "%s: exiting due to reset\n",
3480 ioc->name, __func__));
3481 spin_unlock_irqrestore
3482 (&ioc->taskmgmt_lock, flags);
3483 mutex_unlock(&ioc->
3484 sas_device_info_mutex);
3485 return;
3486 }
3487 spin_unlock_irqrestore(&ioc->taskmgmt_lock,
3488 flags);
3489 }
2285 3490
2286 if (mptsas_sas_expander_pg0(ioc, &buffer, 3491 if (retval && (retval != -ENODEV)) {
2287 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << 3492 if (retry_count < 10) {
2288 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), 3493 retry_count++;
2289 port_info->phy_info[0].handle)) { 3494 goto retry_page;
3495 } else {
3496 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3497 "%s: Config page retry exceeded retry "
3498 "count deleting device 0x%llx\n",
3499 ioc->name, __func__,
3500 sas_info->sas_address));
3501 }
3502 }
2290 3503
2291 /* 3504 /* delete device */
2292 * Obtain the port_info instance to the parent port 3505 vtarget = mptsas_find_vtarget(ioc,
2293 */ 3506 sas_info->fw.channel, sas_info->fw.id);
2294 parent = mptsas_find_portinfo_by_handle(ioc,
2295 port_info->phy_info[0].identify.handle_parent);
2296 3507
2297 if (!parent) 3508 if (vtarget)
2298 goto next_port; 3509 vtarget->deleted = 1;
2299 3510
2300 expander_sas_address = 3511 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2301 port_info->phy_info[0].identify.sas_address; 3512 sas_info->sas_address);
2302 3513
2303 /* 3514 if (phy_info) {
2304 * Delete rphys in the parent that point 3515 mptsas_del_end_device(ioc, phy_info);
2305 * to this expander. The transport layer will 3516 goto redo_device_scan;
2306 * cleanup all the children.
2307 */
2308 phy_info = parent->phy_info;
2309 for (i = 0; i < parent->num_phys; i++, phy_info++) {
2310 port = mptsas_get_port(phy_info);
2311 if (!port)
2312 continue;
2313 if (phy_info->attached.sas_address !=
2314 expander_sas_address)
2315 continue;
2316 dsaswideprintk(ioc,
2317 dev_printk(KERN_DEBUG, &port->dev,
2318 MYIOC_s_FMT "delete port (%d)\n", ioc->name,
2319 port->port_identifier));
2320 sas_port_delete(port);
2321 mptsas_port_delete(ioc, phy_info->port_details);
2322 } 3517 }
2323 next_port: 3518 } else
3519 mptsas_volume_delete(ioc, sas_info->fw.id);
3520 }
3521 mutex_lock(&ioc->sas_device_info_mutex);
2324 3522
2325 phy_info = port_info->phy_info; 3523 /* expanders */
2326 for (i = 0; i < port_info->num_phys; i++, phy_info++) 3524 mutex_lock(&ioc->sas_topology_mutex);
2327 mptsas_port_delete(ioc, phy_info->port_details); 3525 redo_expander_scan:
3526 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2328 3527
2329 list_del(&port_info->list); 3528 if (port_info->phy_info &&
2330 kfree(port_info->phy_info); 3529 (!(port_info->phy_info[0].identify.device_info &
2331 kfree(port_info); 3530 MPI_SAS_DEVICE_INFO_SMP_TARGET)))
3531 continue;
3532 found_expander = 0;
3533 handle = 0xFFFF;
3534 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3535 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3536 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
3537 !found_expander) {
3538
3539 handle = buffer.phy_info[0].handle;
3540 if (buffer.phy_info[0].identify.sas_address ==
3541 port_info->phy_info[0].identify.sas_address) {
3542 found_expander = 1;
3543 }
3544 kfree(buffer.phy_info);
3545 }
3546
3547 if (!found_expander) {
3548 mptsas_expander_delete(ioc, port_info, 0);
3549 goto redo_expander_scan;
2332 } 3550 }
2333 /*
2334 * Free this memory allocated from inside
2335 * mptsas_sas_expander_pg0
2336 */
2337 kfree(buffer.phy_info);
2338 } 3551 }
2339 mutex_unlock(&ioc->sas_topology_mutex); 3552 mutex_lock(&ioc->sas_topology_mutex);
3553}
3554
3555/**
3556 * mptsas_probe_expanders - adding expanders
3557 * @ioc: Pointer to MPT_ADAPTER structure
3558 *
3559 **/
3560static void
3561mptsas_probe_expanders(MPT_ADAPTER *ioc)
3562{
3563 struct mptsas_portinfo buffer, *port_info;
3564 u32 handle;
3565 int i;
3566
3567 handle = 0xFFFF;
3568 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3569 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3570 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
3571
3572 handle = buffer.phy_info[0].handle;
3573 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3574 buffer.phy_info[0].identify.sas_address);
3575
3576 if (port_info) {
3577 /* refreshing handles */
3578 for (i = 0; i < buffer.num_phys; i++) {
3579 port_info->phy_info[i].handle = handle;
3580 port_info->phy_info[i].identify.handle_parent =
3581 buffer.phy_info[0].identify.handle_parent;
3582 }
3583 mptsas_expander_refresh(ioc, port_info);
3584 kfree(buffer.phy_info);
3585 continue;
3586 }
3587
3588 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
3589 if (!port_info) {
3590 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3591 "%s: exit at line=%d\n", ioc->name,
3592 __func__, __LINE__));
3593 return;
3594 }
3595 port_info->num_phys = buffer.num_phys;
3596 port_info->phy_info = buffer.phy_info;
3597 for (i = 0; i < port_info->num_phys; i++)
3598 port_info->phy_info[i].portinfo = port_info;
3599 mutex_lock(&ioc->sas_topology_mutex);
3600 list_add_tail(&port_info->list, &ioc->sas_topology);
3601 mutex_unlock(&ioc->sas_topology_mutex);
3602 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3603 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3604 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3605 mptsas_expander_refresh(ioc, port_info);
3606 }
2340} 3607}
2341 3608
2342/* 3609static void
2343 * Start of day discovery 3610mptsas_probe_devices(MPT_ADAPTER *ioc)
2344 */ 3611{
3612 u16 handle;
3613 struct mptsas_devinfo sas_device;
3614 struct mptsas_phyinfo *phy_info;
3615
3616 handle = 0xFFFF;
3617 while (!(mptsas_sas_device_pg0(ioc, &sas_device,
3618 MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
3619
3620 handle = sas_device.handle;
3621
3622 if ((sas_device.device_info &
3623 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
3624 MPI_SAS_DEVICE_INFO_STP_TARGET |
3625 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
3626 continue;
3627
3628 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
3629 if (!phy_info)
3630 continue;
3631
3632 if (mptsas_get_rphy(phy_info))
3633 continue;
3634
3635 mptsas_add_end_device(ioc, phy_info);
3636 }
3637}
3638
3639/**
3640 * mptsas_scan_sas_topology -
3641 * @ioc: Pointer to MPT_ADAPTER structure
3642 * @sas_address:
3643 *
3644 **/
2345static void 3645static void
2346mptsas_scan_sas_topology(MPT_ADAPTER *ioc) 3646mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
2347{ 3647{
2348 u32 handle = 0xFFFF; 3648 struct scsi_device *sdev;
2349 int i; 3649 int i;
2350 3650
2351 mutex_lock(&ioc->sas_discovery_mutex);
2352 mptsas_probe_hba_phys(ioc); 3651 mptsas_probe_hba_phys(ioc);
2353 while (!mptsas_probe_expander_phys(ioc, &handle)) 3652 mptsas_probe_expanders(ioc);
2354 ; 3653 mptsas_probe_devices(ioc);
3654
2355 /* 3655 /*
2356 Reporting RAID volumes. 3656 Reporting RAID volumes.
2357 */ 3657 */
2358 if (!ioc->ir_firmware) 3658 if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
2359 goto out; 3659 !ioc->raid_data.pIocPg2->NumActiveVolumes)
2360 if (!ioc->raid_data.pIocPg2) 3660 return;
2361 goto out;
2362 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
2363 goto out;
2364 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 3661 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
3662 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
3663 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
3664 if (sdev) {
3665 scsi_device_put(sdev);
3666 continue;
3667 }
3668 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
3669 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
3670 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
2365 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, 3671 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
2366 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 3672 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
2367 } 3673 }
2368 out:
2369 mutex_unlock(&ioc->sas_discovery_mutex);
2370} 3674}
2371 3675
2372/* 3676
2373 * Work queue thread to handle Runtime discovery
2374 * Mere purpose is the hot add/delete of expanders
2375 *(Mutex UNLOCKED)
2376 */
2377static void 3677static void
2378__mptsas_discovery_work(MPT_ADAPTER *ioc) 3678mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
2379{ 3679{
2380 u32 handle = 0xFFFF; 3680 MPT_ADAPTER *ioc;
3681 EventDataQueueFull_t *qfull_data;
3682 struct mptsas_device_info *sas_info;
3683 struct scsi_device *sdev;
3684 int depth;
3685 int id = -1;
3686 int channel = -1;
3687 int fw_id, fw_channel;
3688 u16 current_depth;
3689
3690
3691 ioc = fw_event->ioc;
3692 qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
3693 fw_id = qfull_data->TargetID;
3694 fw_channel = qfull_data->Bus;
3695 current_depth = le16_to_cpu(qfull_data->CurrentDepth);
3696
3697 /* if hidden raid component, look for the volume id */
3698 mutex_lock(&ioc->sas_device_info_mutex);
3699 if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
3700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3701 list) {
3702 if (sas_info->is_cached ||
3703 sas_info->is_logical_volume)
3704 continue;
3705 if (sas_info->is_hidden_raid_component &&
3706 (sas_info->fw.channel == fw_channel &&
3707 sas_info->fw.id == fw_id)) {
3708 id = sas_info->volume_id;
3709 channel = MPTSAS_RAID_CHANNEL;
3710 goto out;
3711 }
3712 }
3713 } else {
3714 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3715 list) {
3716 if (sas_info->is_cached ||
3717 sas_info->is_hidden_raid_component ||
3718 sas_info->is_logical_volume)
3719 continue;
3720 if (sas_info->fw.channel == fw_channel &&
3721 sas_info->fw.id == fw_id) {
3722 id = sas_info->os.id;
3723 channel = sas_info->os.channel;
3724 goto out;
3725 }
3726 }
2381 3727
2382 ioc->sas_discovery_runtime=1; 3728 }
2383 mptsas_delete_expander_phys(ioc);
2384 mptsas_probe_hba_phys(ioc);
2385 while (!mptsas_probe_expander_phys(ioc, &handle))
2386 ;
2387 ioc->sas_discovery_runtime=0;
2388}
2389 3729
2390/* 3730 out:
2391 * Work queue thread to handle Runtime discovery 3731 mutex_unlock(&ioc->sas_device_info_mutex);
2392 * Mere purpose is the hot add/delete of expanders 3732
2393 *(Mutex LOCKED) 3733 if (id != -1) {
2394 */ 3734 shost_for_each_device(sdev, ioc->sh) {
2395static void 3735 if (sdev->id == id && sdev->channel == channel) {
2396mptsas_discovery_work(struct work_struct *work) 3736 if (current_depth > sdev->queue_depth) {
2397{ 3737 sdev_printk(KERN_INFO, sdev,
2398 struct mptsas_discovery_event *ev = 3738 "strange observation, the queue "
2399 container_of(work, struct mptsas_discovery_event, work); 3739 "depth is (%d) meanwhile fw queue "
2400 MPT_ADAPTER *ioc = ev->ioc; 3740 "depth (%d)\n", sdev->queue_depth,
3741 current_depth);
3742 continue;
3743 }
3744 depth = scsi_track_queue_full(sdev,
3745 current_depth - 1);
3746 if (depth > 0)
3747 sdev_printk(KERN_INFO, sdev,
3748 "Queue depth reduced to (%d)\n",
3749 depth);
3750 else if (depth < 0)
3751 sdev_printk(KERN_INFO, sdev,
3752 "Tagged Command Queueing is being "
3753 "disabled\n");
3754 else if (depth == 0)
3755 sdev_printk(KERN_INFO, sdev,
3756 "Queue depth not changed yet\n");
3757 }
3758 }
3759 }
2401 3760
2402 mutex_lock(&ioc->sas_discovery_mutex); 3761 mptsas_free_fw_event(ioc, fw_event);
2403 __mptsas_discovery_work(ioc);
2404 mutex_unlock(&ioc->sas_discovery_mutex);
2405 kfree(ev);
2406} 3762}
2407 3763
3764
2408static struct mptsas_phyinfo * 3765static struct mptsas_phyinfo *
2409mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) 3766mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2410{ 3767{
@@ -2429,69 +3786,80 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2429 return phy_info; 3786 return phy_info;
2430} 3787}
2431 3788
3789/**
3790 * mptsas_find_phyinfo_by_phys_disk_num -
3791 * @ioc: Pointer to MPT_ADAPTER structure
3792 * @phys_disk_num:
3793 * @channel:
3794 * @id:
3795 *
3796 **/
2432static struct mptsas_phyinfo * 3797static struct mptsas_phyinfo *
2433mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id) 3798mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
3799 u8 channel, u8 id)
2434{ 3800{
2435 struct mptsas_portinfo *port_info;
2436 struct mptsas_phyinfo *phy_info = NULL; 3801 struct mptsas_phyinfo *phy_info = NULL;
3802 struct mptsas_portinfo *port_info;
3803 RaidPhysDiskPage1_t *phys_disk = NULL;
3804 int num_paths;
3805 u64 sas_address = 0;
2437 int i; 3806 int i;
2438 3807
2439 mutex_lock(&ioc->sas_topology_mutex); 3808 phy_info = NULL;
2440 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3809 if (!ioc->raid_data.pIocPg3)
2441 for (i = 0; i < port_info->num_phys; i++) { 3810 return NULL;
2442 if (!mptsas_is_end_device( 3811 /* dual port support */
2443 &port_info->phy_info[i].attached)) 3812 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
2444 continue; 3813 if (!num_paths)
2445 if (port_info->phy_info[i].attached.id != id) 3814 goto out;
2446 continue; 3815 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2447 if (port_info->phy_info[i].attached.channel != channel) 3816 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2448 continue; 3817 if (!phys_disk)
2449 phy_info = &port_info->phy_info[i]; 3818 goto out;
2450 break; 3819 mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
3820 for (i = 0; i < num_paths; i++) {
3821 if ((phys_disk->Path[i].Flags & 1) != 0)
3822 /* entry no longer valid */
3823 continue;
3824 if ((id == phys_disk->Path[i].PhysDiskID) &&
3825 (channel == phys_disk->Path[i].PhysDiskBus)) {
3826 memcpy(&sas_address, &phys_disk->Path[i].WWID,
3827 sizeof(u64));
3828 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
3829 sas_address);
3830 goto out;
2451 } 3831 }
2452 } 3832 }
2453 mutex_unlock(&ioc->sas_topology_mutex);
2454 return phy_info;
2455}
2456 3833
2457static struct mptsas_phyinfo * 3834 out:
2458mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 3835 kfree(phys_disk);
2459{ 3836 if (phy_info)
2460 struct mptsas_portinfo *port_info; 3837 return phy_info;
2461 struct mptsas_phyinfo *phy_info = NULL;
2462 int i;
2463 3838
3839 /*
3840 * Extra code to handle RAID0 case, where the sas_address is not updated
3841 * in phys_disk_page_1 when hotswapped
3842 */
2464 mutex_lock(&ioc->sas_topology_mutex); 3843 mutex_lock(&ioc->sas_topology_mutex);
2465 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3844 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2466 for (i = 0; i < port_info->num_phys; i++) { 3845 for (i = 0; i < port_info->num_phys && !phy_info; i++) {
2467 if (!mptsas_is_end_device( 3846 if (!mptsas_is_end_device(
2468 &port_info->phy_info[i].attached)) 3847 &port_info->phy_info[i].attached))
2469 continue; 3848 continue;
2470 if (port_info->phy_info[i].attached.phys_disk_num == ~0) 3849 if (port_info->phy_info[i].attached.phys_disk_num == ~0)
2471 continue; 3850 continue;
2472 if (port_info->phy_info[i].attached.phys_disk_num != id) 3851 if ((port_info->phy_info[i].attached.phys_disk_num ==
2473 continue; 3852 phys_disk_num) &&
2474 if (port_info->phy_info[i].attached.channel != channel) 3853 (port_info->phy_info[i].attached.id == id) &&
2475 continue; 3854 (port_info->phy_info[i].attached.channel ==
2476 phy_info = &port_info->phy_info[i]; 3855 channel))
2477 break; 3856 phy_info = &port_info->phy_info[i];
2478 } 3857 }
2479 } 3858 }
2480 mutex_unlock(&ioc->sas_topology_mutex); 3859 mutex_unlock(&ioc->sas_topology_mutex);
2481 return phy_info; 3860 return phy_info;
2482} 3861}
2483 3862
2484/*
2485 * Work queue thread to clear the persitency table
2486 */
2487static void
2488mptsas_persist_clear_table(struct work_struct *work)
2489{
2490 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2491
2492 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2493}
2494
2495static void 3863static void
2496mptsas_reprobe_lun(struct scsi_device *sdev, void *data) 3864mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
2497{ 3865{
@@ -2517,7 +3885,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2517 pRaidVolumePage0_t buffer = NULL; 3885 pRaidVolumePage0_t buffer = NULL;
2518 RaidPhysDiskPage0_t phys_disk; 3886 RaidPhysDiskPage0_t phys_disk;
2519 int i; 3887 int i;
2520 struct mptsas_hotplug_event *ev; 3888 struct mptsas_phyinfo *phy_info;
3889 struct mptsas_devinfo sas_device;
2521 3890
2522 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 3891 memset(&cfg, 0 , sizeof(CONFIGPARMS));
2523 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 3892 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
@@ -2557,20 +3926,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2557 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) 3926 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
2558 continue; 3927 continue;
2559 3928
2560 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3929 if (mptsas_sas_device_pg0(ioc, &sas_device,
2561 if (!ev) { 3930 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2562 printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name); 3931 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2563 goto out; 3932 (phys_disk.PhysDiskBus << 8) +
2564 } 3933 phys_disk.PhysDiskID))
3934 continue;
2565 3935
2566 INIT_WORK(&ev->work, mptsas_hotplug_work); 3936 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2567 ev->ioc = ioc; 3937 sas_device.sas_address);
2568 ev->id = phys_disk.PhysDiskID; 3938 mptsas_add_end_device(ioc, phy_info);
2569 ev->channel = phys_disk.PhysDiskBus;
2570 ev->phys_disk_num_valid = 1;
2571 ev->phys_disk_num = phys_disk.PhysDiskNum;
2572 ev->event_type = MPTSAS_ADD_DEVICE;
2573 schedule_work(&ev->work);
2574 } 3939 }
2575 3940
2576 out: 3941 out:
@@ -2582,417 +3947,386 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2582 * Work queue thread to handle SAS hotplug events 3947 * Work queue thread to handle SAS hotplug events
2583 */ 3948 */
2584static void 3949static void
2585mptsas_hotplug_work(struct work_struct *work) 3950mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
3951 struct mptsas_hotplug_event *hot_plug_info)
2586{ 3952{
2587 struct mptsas_hotplug_event *ev =
2588 container_of(work, struct mptsas_hotplug_event, work);
2589
2590 MPT_ADAPTER *ioc = ev->ioc;
2591 struct mptsas_phyinfo *phy_info; 3953 struct mptsas_phyinfo *phy_info;
2592 struct sas_rphy *rphy;
2593 struct sas_port *port;
2594 struct scsi_device *sdev;
2595 struct scsi_target * starget; 3954 struct scsi_target * starget;
2596 struct sas_identify identify;
2597 char *ds = NULL;
2598 struct mptsas_devinfo sas_device; 3955 struct mptsas_devinfo sas_device;
2599 VirtTarget *vtarget; 3956 VirtTarget *vtarget;
2600 VirtDevice *vdevice; 3957 int i;
2601 3958
2602 mutex_lock(&ioc->sas_discovery_mutex); 3959 switch (hot_plug_info->event_type) {
2603 switch (ev->event_type) {
2604 case MPTSAS_DEL_DEVICE:
2605 3960
2606 phy_info = NULL; 3961 case MPTSAS_ADD_PHYSDISK:
2607 if (ev->phys_disk_num_valid) { 3962
2608 if (ev->hidden_raid_component){ 3963 if (!ioc->raid_data.pIocPg2)
2609 if (mptsas_sas_device_pg0(ioc, &sas_device, 3964 break;
2610 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 3965
2611 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3966 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
2612 (ev->channel << 8) + ev->id)) { 3967 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
2613 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 3968 hot_plug_info->id) {
2614 "%s: exit at line=%d\n", ioc->name, 3969 printk(MYIOC_s_WARN_FMT "firmware bug: unable "
2615 __func__, __LINE__)); 3970 "to add hidden disk - target_id matchs "
2616 break; 3971 "volume_id\n", ioc->name);
2617 } 3972 mptsas_free_fw_event(ioc, fw_event);
2618 phy_info = mptsas_find_phyinfo_by_sas_address( 3973 return;
2619 ioc, sas_device.sas_address); 3974 }
2620 }else
2621 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
2622 ioc, ev->channel, ev->phys_disk_num);
2623 } 3975 }
3976 mpt_findImVolumes(ioc);
2624 3977
3978 case MPTSAS_ADD_DEVICE:
3979 memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
3980 mptsas_sas_device_pg0(ioc, &sas_device,
3981 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
3982 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3983 (hot_plug_info->channel << 8) +
3984 hot_plug_info->id);
3985
3986 if (!sas_device.handle)
3987 return;
3988
3989 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
2625 if (!phy_info) 3990 if (!phy_info)
2626 phy_info = mptsas_find_phyinfo_by_target(ioc, 3991 break;
2627 ev->channel, ev->id);
2628 3992
2629 /* 3993 if (mptsas_get_rphy(phy_info))
2630 * Sanity checks, for non-existing phys and remote rphys. 3994 break;
2631 */ 3995
2632 if (!phy_info){ 3996 mptsas_add_end_device(ioc, phy_info);
3997 break;
3998
3999 case MPTSAS_DEL_DEVICE:
4000 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
4001 hot_plug_info->sas_address);
4002 mptsas_del_end_device(ioc, phy_info);
4003 break;
4004
4005 case MPTSAS_DEL_PHYSDISK:
4006
4007 mpt_findImVolumes(ioc);
4008
4009 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
4010 ioc, hot_plug_info->phys_disk_num,
4011 hot_plug_info->channel,
4012 hot_plug_info->id);
4013 mptsas_del_end_device(ioc, phy_info);
4014 break;
4015
4016 case MPTSAS_ADD_PHYSDISK_REPROBE:
4017
4018 if (mptsas_sas_device_pg0(ioc, &sas_device,
4019 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
4020 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
4021 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2633 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4022 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2634 "%s: exit at line=%d\n", ioc->name, 4023 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2635 __func__, __LINE__)); 4024 __func__, hot_plug_info->id, __LINE__));
2636 break; 4025 break;
2637 } 4026 }
2638 if (!phy_info->port_details) { 4027
4028 phy_info = mptsas_find_phyinfo_by_sas_address(
4029 ioc, sas_device.sas_address);
4030
4031 if (!phy_info) {
2639 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4032 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2640 "%s: exit at line=%d\n", ioc->name, 4033 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2641 __func__, __LINE__)); 4034 __func__, hot_plug_info->id, __LINE__));
2642 break; 4035 break;
2643 } 4036 }
2644 rphy = mptsas_get_rphy(phy_info); 4037
2645 if (!rphy) { 4038 starget = mptsas_get_starget(phy_info);
4039 if (!starget) {
2646 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4040 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2647 "%s: exit at line=%d\n", ioc->name, 4041 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2648 __func__, __LINE__)); 4042 __func__, hot_plug_info->id, __LINE__));
2649 break; 4043 break;
2650 } 4044 }
2651 4045
2652 port = mptsas_get_port(phy_info); 4046 vtarget = starget->hostdata;
2653 if (!port) { 4047 if (!vtarget) {
2654 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4048 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2655 "%s: exit at line=%d\n", ioc->name, 4049 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2656 __func__, __LINE__)); 4050 __func__, hot_plug_info->id, __LINE__));
2657 break; 4051 break;
2658 } 4052 }
2659 4053
2660 starget = mptsas_get_starget(phy_info); 4054 mpt_findImVolumes(ioc);
2661 if (starget) {
2662 vtarget = starget->hostdata;
2663 4055
2664 if (!vtarget) { 4056 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
2665 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4057 "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2666 "%s: exit at line=%d\n", ioc->name, 4058 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2667 __func__, __LINE__)); 4059 hot_plug_info->phys_disk_num, (unsigned long long)
2668 break; 4060 sas_device.sas_address);
2669 }
2670 4061
2671 /* 4062 vtarget->id = hot_plug_info->phys_disk_num;
2672 * Handling RAID components 4063 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
2673 */ 4064 phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
2674 if (ev->phys_disk_num_valid && 4065 mptsas_reprobe_target(starget, 1);
2675 ev->hidden_raid_component) {
2676 printk(MYIOC_s_INFO_FMT
2677 "RAID Hidding: channel=%d, id=%d, "
2678 "physdsk %d \n", ioc->name, ev->channel,
2679 ev->id, ev->phys_disk_num);
2680 vtarget->id = ev->phys_disk_num;
2681 vtarget->tflags |=
2682 MPT_TARGET_FLAGS_RAID_COMPONENT;
2683 mptsas_reprobe_target(starget, 1);
2684 phy_info->attached.phys_disk_num =
2685 ev->phys_disk_num;
2686 break;
2687 }
2688 }
2689
2690 if (phy_info->attached.device_info &
2691 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2692 ds = "ssp";
2693 if (phy_info->attached.device_info &
2694 MPI_SAS_DEVICE_INFO_STP_TARGET)
2695 ds = "stp";
2696 if (phy_info->attached.device_info &
2697 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2698 ds = "sata";
2699
2700 printk(MYIOC_s_INFO_FMT
2701 "removing %s device, channel %d, id %d, phy %d\n",
2702 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2703 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
2704 "delete port (%d)\n", ioc->name, port->port_identifier);
2705 sas_port_delete(port);
2706 mptsas_port_delete(ioc, phy_info->port_details);
2707 break; 4066 break;
2708 case MPTSAS_ADD_DEVICE:
2709 4067
2710 if (ev->phys_disk_num_valid) 4068 case MPTSAS_DEL_PHYSDISK_REPROBE:
2711 mpt_findImVolumes(ioc);
2712 4069
2713 /*
2714 * Refresh sas device pg0 data
2715 */
2716 if (mptsas_sas_device_pg0(ioc, &sas_device, 4070 if (mptsas_sas_device_pg0(ioc, &sas_device,
2717 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 4071 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2718 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 4072 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2719 (ev->channel << 8) + ev->id)) { 4073 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2720 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4074 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2721 "%s: exit at line=%d\n", ioc->name, 4075 "%s: fw_id=%d exit at line=%d\n",
2722 __func__, __LINE__)); 4076 ioc->name, __func__,
4077 hot_plug_info->id, __LINE__));
2723 break; 4078 break;
2724 } 4079 }
2725 4080
2726 __mptsas_discovery_work(ioc);
2727
2728 phy_info = mptsas_find_phyinfo_by_sas_address(ioc, 4081 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2729 sas_device.sas_address); 4082 sas_device.sas_address);
2730 4083 if (!phy_info) {
2731 if (!phy_info || !phy_info->port_details) {
2732 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4084 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2733 "%s: exit at line=%d\n", ioc->name, 4085 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2734 __func__, __LINE__)); 4086 __func__, hot_plug_info->id, __LINE__));
2735 break; 4087 break;
2736 } 4088 }
2737 4089
2738 starget = mptsas_get_starget(phy_info); 4090 starget = mptsas_get_starget(phy_info);
2739 if (starget && (!ev->hidden_raid_component)){ 4091 if (!starget) {
2740 4092 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2741 vtarget = starget->hostdata; 4093 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2742 4094 __func__, hot_plug_info->id, __LINE__));
2743 if (!vtarget) {
2744 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2745 "%s: exit at line=%d\n", ioc->name,
2746 __func__, __LINE__));
2747 break;
2748 }
2749 /*
2750 * Handling RAID components
2751 */
2752 if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2753 printk(MYIOC_s_INFO_FMT
2754 "RAID Exposing: channel=%d, id=%d, "
2755 "physdsk %d \n", ioc->name, ev->channel,
2756 ev->id, ev->phys_disk_num);
2757 vtarget->tflags &=
2758 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2759 vtarget->id = ev->id;
2760 mptsas_reprobe_target(starget, 0);
2761 phy_info->attached.phys_disk_num = ~0;
2762 }
2763 break; 4095 break;
2764 } 4096 }
2765 4097
2766 if (mptsas_get_rphy(phy_info)) { 4098 vtarget = starget->hostdata;
4099 if (!vtarget) {
2767 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4100 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2768 "%s: exit at line=%d\n", ioc->name, 4101 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2769 __func__, __LINE__)); 4102 __func__, hot_plug_info->id, __LINE__));
2770 if (ev->channel) printk("%d\n", __LINE__);
2771 break; 4103 break;
2772 } 4104 }
2773 4105
2774 port = mptsas_get_port(phy_info); 4106 if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
2775 if (!port) {
2776 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4107 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2777 "%s: exit at line=%d\n", ioc->name, 4108 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2778 __func__, __LINE__)); 4109 __func__, hot_plug_info->id, __LINE__));
2779 break; 4110 break;
2780 } 4111 }
2781 memcpy(&phy_info->attached, &sas_device,
2782 sizeof(struct mptsas_devinfo));
2783
2784 if (phy_info->attached.device_info &
2785 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2786 ds = "ssp";
2787 if (phy_info->attached.device_info &
2788 MPI_SAS_DEVICE_INFO_STP_TARGET)
2789 ds = "stp";
2790 if (phy_info->attached.device_info &
2791 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2792 ds = "sata";
2793
2794 printk(MYIOC_s_INFO_FMT
2795 "attaching %s device, channel %d, id %d, phy %d\n",
2796 ioc->name, ds, ev->channel, ev->id, ev->phy_id);
2797 4112
2798 mptsas_parse_device_info(&identify, &phy_info->attached); 4113 mpt_findImVolumes(ioc);
2799 rphy = sas_end_device_alloc(port);
2800 if (!rphy) {
2801 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2802 "%s: exit at line=%d\n", ioc->name,
2803 __func__, __LINE__));
2804 break; /* non-fatal: an rphy can be added later */
2805 }
2806 4114
2807 rphy->identify = identify; 4115 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
2808 if (sas_rphy_add(rphy)) { 4116 " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2809 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4117 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2810 "%s: exit at line=%d\n", ioc->name, 4118 hot_plug_info->phys_disk_num, (unsigned long long)
2811 __func__, __LINE__)); 4119 sas_device.sas_address);
2812 sas_rphy_free(rphy); 4120
2813 break; 4121 vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2814 } 4122 vtarget->id = hot_plug_info->id;
2815 mptsas_set_rphy(ioc, phy_info, rphy); 4123 phy_info->attached.phys_disk_num = ~0;
4124 mptsas_reprobe_target(starget, 0);
4125 mptsas_add_device_component_by_fw(ioc,
4126 hot_plug_info->channel, hot_plug_info->id);
2816 break; 4127 break;
4128
2817 case MPTSAS_ADD_RAID: 4129 case MPTSAS_ADD_RAID:
2818 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4130
2819 ev->id, 0);
2820 if (sdev) {
2821 scsi_device_put(sdev);
2822 break;
2823 }
2824 printk(MYIOC_s_INFO_FMT
2825 "attaching raid volume, channel %d, id %d\n",
2826 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2827 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2828 mpt_findImVolumes(ioc); 4131 mpt_findImVolumes(ioc);
4132 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
4133 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4134 hot_plug_info->id);
4135 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
4136 hot_plug_info->id, 0);
2829 break; 4137 break;
4138
2830 case MPTSAS_DEL_RAID: 4139 case MPTSAS_DEL_RAID:
2831 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4140
2832 ev->id, 0);
2833 if (!sdev)
2834 break;
2835 printk(MYIOC_s_INFO_FMT
2836 "removing raid volume, channel %d, id %d\n",
2837 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2838 vdevice = sdev->hostdata;
2839 scsi_remove_device(sdev);
2840 scsi_device_put(sdev);
2841 mpt_findImVolumes(ioc); 4141 mpt_findImVolumes(ioc);
4142 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4143 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4144 hot_plug_info->id);
4145 scsi_remove_device(hot_plug_info->sdev);
4146 scsi_device_put(hot_plug_info->sdev);
2842 break; 4147 break;
4148
2843 case MPTSAS_ADD_INACTIVE_VOLUME: 4149 case MPTSAS_ADD_INACTIVE_VOLUME:
4150
4151 mpt_findImVolumes(ioc);
2844 mptsas_adding_inactive_raid_components(ioc, 4152 mptsas_adding_inactive_raid_components(ioc,
2845 ev->channel, ev->id); 4153 hot_plug_info->channel, hot_plug_info->id);
2846 break; 4154 break;
2847 case MPTSAS_IGNORE_EVENT: 4155
2848 default: 4156 default:
2849 break; 4157 break;
2850 } 4158 }
2851 4159
2852 mutex_unlock(&ioc->sas_discovery_mutex); 4160 mptsas_free_fw_event(ioc, fw_event);
2853 kfree(ev);
2854} 4161}
2855 4162
2856static void 4163static void
2857mptsas_send_sas_event(MPT_ADAPTER *ioc, 4164mptsas_send_sas_event(struct fw_event_work *fw_event)
2858 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
2859{ 4165{
2860 struct mptsas_hotplug_event *ev; 4166 MPT_ADAPTER *ioc;
2861 u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo); 4167 struct mptsas_hotplug_event hot_plug_info;
2862 __le64 sas_address; 4168 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
4169 u32 device_info;
4170 u64 sas_address;
4171
4172 ioc = fw_event->ioc;
4173 sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
4174 fw_event->event_data;
4175 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
2863 4176
2864 if ((device_info & 4177 if ((device_info &
2865 (MPI_SAS_DEVICE_INFO_SSP_TARGET | 4178 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
2866 MPI_SAS_DEVICE_INFO_STP_TARGET | 4179 MPI_SAS_DEVICE_INFO_STP_TARGET |
2867 MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0) 4180 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
4181 mptsas_free_fw_event(ioc, fw_event);
4182 return;
4183 }
4184
4185 if (sas_event_data->ReasonCode ==
4186 MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
4187 mptbase_sas_persist_operation(ioc,
4188 MPI_SAS_OP_CLEAR_NOT_PRESENT);
4189 mptsas_free_fw_event(ioc, fw_event);
2868 return; 4190 return;
4191 }
2869 4192
2870 switch (sas_event_data->ReasonCode) { 4193 switch (sas_event_data->ReasonCode) {
2871 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 4194 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2872
2873 mptsas_target_reset_queue(ioc, sas_event_data);
2874 break;
2875
2876 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 4195 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2877 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4196 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
2878 if (!ev) { 4197 hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
2879 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4198 hot_plug_info.channel = sas_event_data->Bus;
2880 break; 4199 hot_plug_info.id = sas_event_data->TargetID;
2881 } 4200 hot_plug_info.phy_id = sas_event_data->PhyNum;
2882
2883 INIT_WORK(&ev->work, mptsas_hotplug_work);
2884 ev->ioc = ioc;
2885 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2886 ev->parent_handle =
2887 le16_to_cpu(sas_event_data->ParentDevHandle);
2888 ev->channel = sas_event_data->Bus;
2889 ev->id = sas_event_data->TargetID;
2890 ev->phy_id = sas_event_data->PhyNum;
2891 memcpy(&sas_address, &sas_event_data->SASAddress, 4201 memcpy(&sas_address, &sas_event_data->SASAddress,
2892 sizeof(__le64)); 4202 sizeof(u64));
2893 ev->sas_address = le64_to_cpu(sas_address); 4203 hot_plug_info.sas_address = le64_to_cpu(sas_address);
2894 ev->device_info = device_info; 4204 hot_plug_info.device_info = device_info;
2895
2896 if (sas_event_data->ReasonCode & 4205 if (sas_event_data->ReasonCode &
2897 MPI_EVENT_SAS_DEV_STAT_RC_ADDED) 4206 MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
2898 ev->event_type = MPTSAS_ADD_DEVICE; 4207 hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
2899 else 4208 else
2900 ev->event_type = MPTSAS_DEL_DEVICE; 4209 hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
2901 schedule_work(&ev->work); 4210 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
2902 break; 4211 break;
4212
2903 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 4213 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
2904 /* 4214 mptbase_sas_persist_operation(ioc,
2905 * Persistent table is full. 4215 MPI_SAS_OP_CLEAR_NOT_PRESENT);
2906 */ 4216 mptsas_free_fw_event(ioc, fw_event);
2907 INIT_WORK(&ioc->sas_persist_task,
2908 mptsas_persist_clear_table);
2909 schedule_work(&ioc->sas_persist_task);
2910 break; 4217 break;
2911 /* 4218
2912 * TODO, handle other events
2913 */
2914 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 4219 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
2915 case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 4220 /* TODO */
2916 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 4221 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2917 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 4222 /* TODO */
2918 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
2919 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
2920 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
2921 default: 4223 default:
4224 mptsas_free_fw_event(ioc, fw_event);
2922 break; 4225 break;
2923 } 4226 }
2924} 4227}
4228
2925static void 4229static void
2926mptsas_send_raid_event(MPT_ADAPTER *ioc, 4230mptsas_send_raid_event(struct fw_event_work *fw_event)
2927 EVENT_DATA_RAID *raid_event_data)
2928{ 4231{
2929 struct mptsas_hotplug_event *ev; 4232 MPT_ADAPTER *ioc;
2930 int status = le32_to_cpu(raid_event_data->SettingsStatus); 4233 EVENT_DATA_RAID *raid_event_data;
2931 int state = (status >> 8) & 0xff; 4234 struct mptsas_hotplug_event hot_plug_info;
2932 4235 int status;
2933 if (ioc->bus_type != SAS) 4236 int state;
2934 return; 4237 struct scsi_device *sdev = NULL;
2935 4238 VirtDevice *vdevice = NULL;
2936 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4239 RaidPhysDiskPage0_t phys_disk;
2937 if (!ev) { 4240
2938 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4241 ioc = fw_event->ioc;
2939 return; 4242 raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
4243 status = le32_to_cpu(raid_event_data->SettingsStatus);
4244 state = (status >> 8) & 0xff;
4245
4246 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4247 hot_plug_info.id = raid_event_data->VolumeID;
4248 hot_plug_info.channel = raid_event_data->VolumeBus;
4249 hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
4250
4251 if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
4252 raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
4253 raid_event_data->ReasonCode ==
4254 MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
4255 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
4256 hot_plug_info.id, 0);
4257 hot_plug_info.sdev = sdev;
4258 if (sdev)
4259 vdevice = sdev->hostdata;
2940 } 4260 }
2941 4261
2942 INIT_WORK(&ev->work, mptsas_hotplug_work); 4262 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
2943 ev->ioc = ioc; 4263 "ReasonCode=%02x\n", ioc->name, __func__,
2944 ev->id = raid_event_data->VolumeID; 4264 raid_event_data->ReasonCode));
2945 ev->channel = raid_event_data->VolumeBus;
2946 ev->event_type = MPTSAS_IGNORE_EVENT;
2947 4265
2948 switch (raid_event_data->ReasonCode) { 4266 switch (raid_event_data->ReasonCode) {
2949 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 4267 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
2950 ev->phys_disk_num_valid = 1; 4268 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
2951 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2952 ev->event_type = MPTSAS_ADD_DEVICE;
2953 break; 4269 break;
2954 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: 4270 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
2955 ev->phys_disk_num_valid = 1; 4271 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
2956 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2957 ev->hidden_raid_component = 1;
2958 ev->event_type = MPTSAS_DEL_DEVICE;
2959 break; 4272 break;
2960 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: 4273 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
2961 switch (state) { 4274 switch (state) {
2962 case MPI_PD_STATE_ONLINE: 4275 case MPI_PD_STATE_ONLINE:
2963 case MPI_PD_STATE_NOT_COMPATIBLE: 4276 case MPI_PD_STATE_NOT_COMPATIBLE:
2964 ev->phys_disk_num_valid = 1; 4277 mpt_raid_phys_disk_pg0(ioc,
2965 ev->phys_disk_num = raid_event_data->PhysDiskNum; 4278 raid_event_data->PhysDiskNum, &phys_disk);
2966 ev->hidden_raid_component = 1; 4279 hot_plug_info.id = phys_disk.PhysDiskID;
2967 ev->event_type = MPTSAS_ADD_DEVICE; 4280 hot_plug_info.channel = phys_disk.PhysDiskBus;
4281 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
2968 break; 4282 break;
4283 case MPI_PD_STATE_FAILED:
2969 case MPI_PD_STATE_MISSING: 4284 case MPI_PD_STATE_MISSING:
2970 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: 4285 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
2971 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: 4286 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
2972 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: 4287 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
2973 ev->phys_disk_num_valid = 1; 4288 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
2974 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2975 ev->event_type = MPTSAS_DEL_DEVICE;
2976 break; 4289 break;
2977 default: 4290 default:
2978 break; 4291 break;
2979 } 4292 }
2980 break; 4293 break;
2981 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 4294 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
2982 ev->event_type = MPTSAS_DEL_RAID; 4295 if (!sdev)
4296 break;
4297 vdevice->vtarget->deleted = 1; /* block IO */
4298 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2983 break; 4299 break;
2984 case MPI_EVENT_RAID_RC_VOLUME_CREATED: 4300 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
2985 ev->event_type = MPTSAS_ADD_RAID; 4301 if (sdev) {
4302 scsi_device_put(sdev);
4303 break;
4304 }
4305 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2986 break; 4306 break;
2987 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 4307 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
4308 if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
4309 if (!sdev)
4310 break;
4311 vdevice->vtarget->deleted = 1; /* block IO */
4312 hot_plug_info.event_type = MPTSAS_DEL_RAID;
4313 break;
4314 }
2988 switch (state) { 4315 switch (state) {
2989 case MPI_RAIDVOL0_STATUS_STATE_FAILED: 4316 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
2990 case MPI_RAIDVOL0_STATUS_STATE_MISSING: 4317 case MPI_RAIDVOL0_STATUS_STATE_MISSING:
2991 ev->event_type = MPTSAS_DEL_RAID; 4318 if (!sdev)
4319 break;
4320 vdevice->vtarget->deleted = 1; /* block IO */
4321 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2992 break; 4322 break;
2993 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: 4323 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
2994 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: 4324 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
2995 ev->event_type = MPTSAS_ADD_RAID; 4325 if (sdev) {
4326 scsi_device_put(sdev);
4327 break;
4328 }
4329 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2996 break; 4330 break;
2997 default: 4331 default:
2998 break; 4332 break;
@@ -3001,32 +4335,188 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
3001 default: 4335 default:
3002 break; 4336 break;
3003 } 4337 }
3004 schedule_work(&ev->work); 4338
4339 if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
4340 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
4341 else
4342 mptsas_free_fw_event(ioc, fw_event);
3005} 4343}
3006 4344
3007static void 4345/**
3008mptsas_send_discovery_event(MPT_ADAPTER *ioc, 4346 * mptsas_issue_tm - send mptsas internal tm request
3009 EVENT_DATA_SAS_DISCOVERY *discovery_data) 4347 * @ioc: Pointer to MPT_ADAPTER structure
4348 * @type: Task Management type
4349 * @channel: channel number for task management
4350 * @id: Logical Target ID for reset (if appropriate)
4351 * @lun: Logical unit for reset (if appropriate)
4352 * @task_context: Context for the task to be aborted
4353 * @timeout: timeout for task management control
4354 *
4355 * return 0 on success and -1 on failure:
4356 *
4357 */
4358static int
4359mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
4360 int task_context, ulong timeout, u8 *issue_reset)
3010{ 4361{
3011 struct mptsas_discovery_event *ev; 4362 MPT_FRAME_HDR *mf;
4363 SCSITaskMgmt_t *pScsiTm;
4364 int retval;
4365 unsigned long timeleft;
4366
4367 *issue_reset = 0;
4368 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
4369 if (mf == NULL) {
4370 retval = -1; /* return failure */
4371 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
4372 "msg frames!!\n", ioc->name));
4373 goto out;
4374 }
3012 4375
3013 /* 4376 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
3014 * DiscoveryStatus 4377 "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
3015 * 4378 "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
3016 * This flag will be non-zero when firmware 4379 type, timeout, channel, id, (unsigned long long)lun,
3017 * kicks off discovery, and return to zero 4380 task_context));
3018 * once its completed. 4381
3019 */ 4382 pScsiTm = (SCSITaskMgmt_t *) mf;
3020 if (discovery_data->DiscoveryStatus) 4383 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
3021 return; 4384 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4385 pScsiTm->TaskType = type;
4386 pScsiTm->MsgFlags = 0;
4387 pScsiTm->TargetID = id;
4388 pScsiTm->Bus = channel;
4389 pScsiTm->ChainOffset = 0;
4390 pScsiTm->Reserved = 0;
4391 pScsiTm->Reserved1 = 0;
4392 pScsiTm->TaskMsgContext = task_context;
4393 int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
4394
4395 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4396 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
4397 retval = 0;
4398 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
4399
4400 /* Now wait for the command to complete */
4401 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
4402 timeout*HZ);
4403 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
4404 retval = -1; /* return failure */
4405 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
4406 "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
4407 mpt_free_msg_frame(ioc, mf);
4408 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
4409 goto out;
4410 *issue_reset = 1;
4411 goto out;
4412 }
3022 4413
3023 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4414 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
3024 if (!ev) 4415 retval = -1; /* return failure */
4416 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4417 "TaskMgmt request: failed with no reply\n", ioc->name));
4418 goto out;
4419 }
4420
4421 out:
4422 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4423 return retval;
4424}
4425
4426/**
4427 * mptsas_broadcast_primative_work - Handle broadcast primitives
4428 * @work: work queue payload containing info describing the event
4429 *
4430 * this will be handled in workqueue context.
4431 */
4432static void
4433mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
4434{
4435 MPT_ADAPTER *ioc = fw_event->ioc;
4436 MPT_FRAME_HDR *mf;
4437 VirtDevice *vdevice;
4438 int ii;
4439 struct scsi_cmnd *sc;
4440 SCSITaskMgmtReply_t *pScsiTmReply;
4441 u8 issue_reset;
4442 int task_context;
4443 u8 channel, id;
4444 int lun;
4445 u32 termination_count;
4446 u32 query_count;
4447
4448 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4449 "%s - enter\n", ioc->name, __func__));
4450
4451 mutex_lock(&ioc->taskmgmt_cmds.mutex);
4452 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
4453 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4454 mptsas_requeue_fw_event(ioc, fw_event, 1000);
3025 return; 4455 return;
3026 INIT_WORK(&ev->work, mptsas_discovery_work); 4456 }
3027 ev->ioc = ioc; 4457
3028 schedule_work(&ev->work); 4458 issue_reset = 0;
3029}; 4459 termination_count = 0;
4460 query_count = 0;
4461 mpt_findImVolumes(ioc);
4462 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
4463
4464 for (ii = 0; ii < ioc->req_depth; ii++) {
4465 if (ioc->fw_events_off)
4466 goto out;
4467 sc = mptscsih_get_scsi_lookup(ioc, ii);
4468 if (!sc)
4469 continue;
4470 mf = MPT_INDEX_2_MFPTR(ioc, ii);
4471 if (!mf)
4472 continue;
4473 task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
4474 vdevice = sc->device->hostdata;
4475 if (!vdevice || !vdevice->vtarget)
4476 continue;
4477 if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
4478 continue; /* skip hidden raid components */
4479 if (vdevice->vtarget->raidVolume)
4480 continue; /* skip hidden raid components */
4481 channel = vdevice->vtarget->channel;
4482 id = vdevice->vtarget->id;
4483 lun = vdevice->lun;
4484 if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
4485 channel, id, (u64)lun, task_context, 30, &issue_reset))
4486 goto out;
4487 query_count++;
4488 termination_count +=
4489 le32_to_cpu(pScsiTmReply->TerminationCount);
4490 if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
4491 (pScsiTmReply->ResponseCode ==
4492 MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
4493 pScsiTmReply->ResponseCode ==
4494 MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
4495 continue;
4496 if (mptsas_issue_tm(ioc,
4497 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
4498 channel, id, (u64)lun, 0, 30, &issue_reset))
4499 goto out;
4500 termination_count +=
4501 le32_to_cpu(pScsiTmReply->TerminationCount);
4502 }
4503
4504 out:
4505 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4506 "%s - exit, query_count = %d termination_count = %d\n",
4507 ioc->name, __func__, query_count, termination_count));
4508
4509 ioc->broadcast_aen_busy = 0;
4510 mpt_clear_taskmgmt_in_progress_flag(ioc);
4511 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4512
4513 if (issue_reset) {
4514 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
4515 ioc->name, __func__);
4516 mpt_HardResetHandler(ioc, CAN_SLEEP);
4517 }
4518 mptsas_free_fw_event(ioc, fw_event);
4519}
3030 4520
3031/* 4521/*
3032 * mptsas_send_ir2_event - handle exposing hidden disk when 4522 * mptsas_send_ir2_event - handle exposing hidden disk when
@@ -3037,76 +4527,159 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
3037 * 4527 *
3038 */ 4528 */
3039static void 4529static void
3040mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data) 4530mptsas_send_ir2_event(struct fw_event_work *fw_event)
3041{ 4531{
3042 struct mptsas_hotplug_event *ev; 4532 MPT_ADAPTER *ioc;
3043 4533 struct mptsas_hotplug_event hot_plug_info;
3044 if (ir2_data->ReasonCode != 4534 MPI_EVENT_DATA_IR2 *ir2_data;
3045 MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED) 4535 u8 reasonCode;
3046 return; 4536 RaidPhysDiskPage0_t phys_disk;
3047 4537
3048 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4538 ioc = fw_event->ioc;
3049 if (!ev) 4539 ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
4540 reasonCode = ir2_data->ReasonCode;
4541
4542 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
4543 "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
4544
4545 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4546 hot_plug_info.id = ir2_data->TargetID;
4547 hot_plug_info.channel = ir2_data->Bus;
4548 switch (reasonCode) {
4549 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
4550 hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
4551 break;
4552 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
4553 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4554 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
4555 break;
4556 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
4557 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4558 mpt_raid_phys_disk_pg0(ioc,
4559 ir2_data->PhysDiskNum, &phys_disk);
4560 hot_plug_info.id = phys_disk.PhysDiskID;
4561 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
4562 break;
4563 default:
4564 mptsas_free_fw_event(ioc, fw_event);
3050 return; 4565 return;
3051 4566 }
3052 INIT_WORK(&ev->work, mptsas_hotplug_work); 4567 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
3053 ev->ioc = ioc; 4568}
3054 ev->id = ir2_data->TargetID;
3055 ev->channel = ir2_data->Bus;
3056 ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
3057
3058 schedule_work(&ev->work);
3059};
3060 4569
3061static int 4570static int
3062mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) 4571mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
3063{ 4572{
3064 int rc=1; 4573 u32 event = le32_to_cpu(reply->Event);
3065 u8 event = le32_to_cpu(reply->Event) & 0xFF; 4574 int sz, event_data_sz;
4575 struct fw_event_work *fw_event;
4576 unsigned long delay;
3066 4577
3067 if (!ioc->sh) 4578 /* events turned off due to host reset or driver unloading */
3068 goto out; 4579 if (ioc->fw_events_off)
3069 4580 return 0;
3070 /*
3071 * sas_discovery_ignore_events
3072 *
3073 * This flag is to prevent anymore processing of
3074 * sas events once mptsas_remove function is called.
3075 */
3076 if (ioc->sas_discovery_ignore_events) {
3077 rc = mptscsih_event_process(ioc, reply);
3078 goto out;
3079 }
3080 4581
4582 delay = msecs_to_jiffies(1);
3081 switch (event) { 4583 switch (event) {
4584 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
4585 {
4586 EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
4587 (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
4588 if (broadcast_event_data->Primitive !=
4589 MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
4590 return 0;
4591 if (ioc->broadcast_aen_busy)
4592 return 0;
4593 ioc->broadcast_aen_busy = 1;
4594 break;
4595 }
3082 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 4596 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
3083 mptsas_send_sas_event(ioc, 4597 {
3084 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); 4598 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
4599 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
4600
4601 if (sas_event_data->ReasonCode ==
4602 MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
4603 mptsas_target_reset_queue(ioc, sas_event_data);
4604 return 0;
4605 }
3085 break; 4606 break;
3086 case MPI_EVENT_INTEGRATED_RAID: 4607 }
3087 mptsas_send_raid_event(ioc, 4608 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
3088 (EVENT_DATA_RAID *)reply->Data); 4609 {
4610 MpiEventDataSasExpanderStatusChange_t *expander_data =
4611 (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
4612
4613 if (ioc->old_sas_discovery_protocal)
4614 return 0;
4615
4616 if (expander_data->ReasonCode ==
4617 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
4618 ioc->device_missing_delay)
4619 delay = HZ * ioc->device_missing_delay;
3089 break; 4620 break;
4621 }
4622 case MPI_EVENT_SAS_DISCOVERY:
4623 {
4624 u32 discovery_status;
4625 EventDataSasDiscovery_t *discovery_data =
4626 (EventDataSasDiscovery_t *)reply->Data;
4627
4628 discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
4629 ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
4630 if (ioc->old_sas_discovery_protocal && !discovery_status)
4631 mptsas_queue_rescan(ioc);
4632 return 0;
4633 }
4634 case MPI_EVENT_INTEGRATED_RAID:
3090 case MPI_EVENT_PERSISTENT_TABLE_FULL: 4635 case MPI_EVENT_PERSISTENT_TABLE_FULL:
3091 INIT_WORK(&ioc->sas_persist_task,
3092 mptsas_persist_clear_table);
3093 schedule_work(&ioc->sas_persist_task);
3094 break;
3095 case MPI_EVENT_SAS_DISCOVERY:
3096 mptsas_send_discovery_event(ioc,
3097 (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
3098 break;
3099 case MPI_EVENT_IR2: 4636 case MPI_EVENT_IR2:
3100 mptsas_send_ir2_event(ioc, 4637 case MPI_EVENT_SAS_PHY_LINK_STATUS:
3101 (PTR_MPI_EVENT_DATA_IR2)reply->Data); 4638 case MPI_EVENT_QUEUE_FULL:
3102 break; 4639 break;
3103 default: 4640 default:
3104 rc = mptscsih_event_process(ioc, reply); 4641 return 0;
3105 break;
3106 } 4642 }
3107 out:
3108 4643
3109 return rc; 4644 event_data_sz = ((reply->MsgLength * 4) -
4645 offsetof(EventNotificationReply_t, Data));
4646 sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
4647 fw_event = kzalloc(sz, GFP_ATOMIC);
4648 if (!fw_event) {
4649 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
4650 __func__, __LINE__);
4651 return 0;
4652 }
4653 memcpy(fw_event->event_data, reply->Data, event_data_sz);
4654 fw_event->event = event;
4655 fw_event->ioc = ioc;
4656 mptsas_add_fw_event(ioc, fw_event, delay);
4657 return 0;
4658}
4659
4660/* Delete a volume when no longer listed in ioc pg2
4661 */
4662static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
4663{
4664 struct scsi_device *sdev;
4665 int i;
4666
4667 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
4668 if (!sdev)
4669 return;
4670 if (!ioc->raid_data.pIocPg2)
4671 goto out;
4672 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
4673 goto out;
4674 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
4675 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
4676 goto release_sdev;
4677 out:
4678 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4679 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
4680 scsi_remove_device(sdev);
4681 release_sdev:
4682 scsi_device_put(sdev);
3110} 4683}
3111 4684
3112static int 4685static int
@@ -3128,6 +4701,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3128 return r; 4701 return r;
3129 4702
3130 ioc = pci_get_drvdata(pdev); 4703 ioc = pci_get_drvdata(pdev);
4704 mptsas_fw_event_off(ioc);
3131 ioc->DoneCtx = mptsasDoneCtx; 4705 ioc->DoneCtx = mptsasDoneCtx;
3132 ioc->TaskCtx = mptsasTaskCtx; 4706 ioc->TaskCtx = mptsasTaskCtx;
3133 ioc->InternalCtx = mptsasInternalCtx; 4707 ioc->InternalCtx = mptsasInternalCtx;
@@ -3211,17 +4785,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3211 * A slightly different algorithm is required for 4785 * A slightly different algorithm is required for
3212 * 64bit SGEs. 4786 * 64bit SGEs.
3213 */ 4787 */
3214 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4788 scale = ioc->req_sz/ioc->SGE_size;
3215 if (sizeof(dma_addr_t) == sizeof(u64)) { 4789 if (ioc->sg_addr_size == sizeof(u64)) {
3216 numSGE = (scale - 1) * 4790 numSGE = (scale - 1) *
3217 (ioc->facts.MaxChainDepth-1) + scale + 4791 (ioc->facts.MaxChainDepth-1) + scale +
3218 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 4792 (ioc->req_sz - 60) / ioc->SGE_size;
3219 sizeof(u32));
3220 } else { 4793 } else {
3221 numSGE = 1 + (scale - 1) * 4794 numSGE = 1 + (scale - 1) *
3222 (ioc->facts.MaxChainDepth-1) + scale + 4795 (ioc->facts.MaxChainDepth-1) + scale +
3223 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 4796 (ioc->req_sz - 64) / ioc->SGE_size;
3224 sizeof(u32));
3225 } 4797 }
3226 4798
3227 if (numSGE < sh->sg_tablesize) { 4799 if (numSGE < sh->sg_tablesize) {
@@ -3251,9 +4823,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3251 4823
3252 /* Clear the TM flags 4824 /* Clear the TM flags
3253 */ 4825 */
3254 hd->tmPending = 0;
3255 hd->tmState = TM_STATE_NONE;
3256 hd->resetPending = 0;
3257 hd->abortSCpnt = NULL; 4826 hd->abortSCpnt = NULL;
3258 4827
3259 /* Clear the pointer used to store 4828 /* Clear the pointer used to store
@@ -3273,10 +4842,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3273 4842
3274 ioc->sas_data.ptClear = mpt_pt_clear; 4843 ioc->sas_data.ptClear = mpt_pt_clear;
3275 4844
3276 init_waitqueue_head(&hd->scandv_waitq);
3277 hd->scandv_wait_done = 0;
3278 hd->last_queue_full = 0; 4845 hd->last_queue_full = 0;
3279 INIT_LIST_HEAD(&hd->target_reset_list); 4846 INIT_LIST_HEAD(&hd->target_reset_list);
4847 INIT_LIST_HEAD(&ioc->sas_device_info_list);
4848 mutex_init(&ioc->sas_device_info_mutex);
4849
3280 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 4850 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
3281 4851
3282 if (ioc->sas_data.ptClear==1) { 4852 if (ioc->sas_data.ptClear==1) {
@@ -3291,8 +4861,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3291 goto out_mptsas_probe; 4861 goto out_mptsas_probe;
3292 } 4862 }
3293 4863
4864 /* older firmware doesn't support expander events */
4865 if ((ioc->facts.HeaderVersion >> 8) < 0xE)
4866 ioc->old_sas_discovery_protocal = 1;
3294 mptsas_scan_sas_topology(ioc); 4867 mptsas_scan_sas_topology(ioc);
3295 4868 mptsas_fw_event_on(ioc);
3296 return 0; 4869 return 0;
3297 4870
3298 out_mptsas_probe: 4871 out_mptsas_probe:
@@ -3301,12 +4874,25 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3301 return error; 4874 return error;
3302} 4875}
3303 4876
4877void
4878mptsas_shutdown(struct pci_dev *pdev)
4879{
4880 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
4881
4882 mptsas_fw_event_off(ioc);
4883 mptsas_cleanup_fw_event_q(ioc);
4884}
4885
3304static void __devexit mptsas_remove(struct pci_dev *pdev) 4886static void __devexit mptsas_remove(struct pci_dev *pdev)
3305{ 4887{
3306 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 4888 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
3307 struct mptsas_portinfo *p, *n; 4889 struct mptsas_portinfo *p, *n;
3308 int i; 4890 int i;
3309 4891
4892 mptsas_shutdown(pdev);
4893
4894 mptsas_del_device_components(ioc);
4895
3310 ioc->sas_discovery_ignore_events = 1; 4896 ioc->sas_discovery_ignore_events = 1;
3311 sas_remove_host(ioc->sh); 4897 sas_remove_host(ioc->sh);
3312 4898
@@ -3315,11 +4901,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
3315 list_del(&p->list); 4901 list_del(&p->list);
3316 for (i = 0 ; i < p->num_phys ; i++) 4902 for (i = 0 ; i < p->num_phys ; i++)
3317 mptsas_port_delete(ioc, p->phy_info[i].port_details); 4903 mptsas_port_delete(ioc, p->phy_info[i].port_details);
4904
3318 kfree(p->phy_info); 4905 kfree(p->phy_info);
3319 kfree(p); 4906 kfree(p);
3320 } 4907 }
3321 mutex_unlock(&ioc->sas_topology_mutex); 4908 mutex_unlock(&ioc->sas_topology_mutex);
3322 4909 ioc->hba_port_info = NULL;
3323 mptscsih_remove(pdev); 4910 mptscsih_remove(pdev);
3324} 4911}
3325 4912
@@ -3344,7 +4931,7 @@ static struct pci_driver mptsas_driver = {
3344 .id_table = mptsas_pci_table, 4931 .id_table = mptsas_pci_table,
3345 .probe = mptsas_probe, 4932 .probe = mptsas_probe,
3346 .remove = __devexit_p(mptsas_remove), 4933 .remove = __devexit_p(mptsas_remove),
3347 .shutdown = mptscsih_shutdown, 4934 .shutdown = mptsas_shutdown,
3348#ifdef CONFIG_PM 4935#ifdef CONFIG_PM
3349 .suspend = mptscsih_suspend, 4936 .suspend = mptscsih_suspend,
3350 .resume = mptscsih_resume, 4937 .resume = mptscsih_resume,
@@ -3364,10 +4951,12 @@ mptsas_init(void)
3364 return -ENODEV; 4951 return -ENODEV;
3365 4952
3366 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); 4953 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
3367 mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); 4954 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
3368 mptsasInternalCtx = 4955 mptsasInternalCtx =
3369 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); 4956 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
3370 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); 4957 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
4958 mptsasDeviceResetCtx =
4959 mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
3371 4960
3372 mpt_event_register(mptsasDoneCtx, mptsas_event_process); 4961 mpt_event_register(mptsasDoneCtx, mptsas_event_process);
3373 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); 4962 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@ -3392,6 +4981,7 @@ mptsas_exit(void)
3392 mpt_deregister(mptsasInternalCtx); 4981 mpt_deregister(mptsasInternalCtx);
3393 mpt_deregister(mptsasTaskCtx); 4982 mpt_deregister(mptsasTaskCtx);
3394 mpt_deregister(mptsasDoneCtx); 4983 mpt_deregister(mptsasDoneCtx);
4984 mpt_deregister(mptsasDeviceResetCtx);
3395} 4985}
3396 4986
3397module_init(mptsas_init); 4987module_init(mptsas_init);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 2b544e0877e6..953c2bfcf6aa 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
53 struct list_head list; 53 struct list_head list;
54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; 54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
55 u8 target_reset_issued; 55 u8 target_reset_issued;
56 unsigned long time_count;
56}; 57};
57 58
58enum mptsas_hotplug_action { 59enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
60 MPTSAS_DEL_DEVICE, 61 MPTSAS_DEL_DEVICE,
61 MPTSAS_ADD_RAID, 62 MPTSAS_ADD_RAID,
62 MPTSAS_DEL_RAID, 63 MPTSAS_DEL_RAID,
64 MPTSAS_ADD_PHYSDISK,
65 MPTSAS_ADD_PHYSDISK_REPROBE,
66 MPTSAS_DEL_PHYSDISK,
67 MPTSAS_DEL_PHYSDISK_REPROBE,
63 MPTSAS_ADD_INACTIVE_VOLUME, 68 MPTSAS_ADD_INACTIVE_VOLUME,
64 MPTSAS_IGNORE_EVENT, 69 MPTSAS_IGNORE_EVENT,
65}; 70};
66 71
72struct mptsas_mapping{
73 u8 id;
74 u8 channel;
75};
76
77struct mptsas_device_info {
78 struct list_head list;
79 struct mptsas_mapping os; /* operating system mapping*/
80 struct mptsas_mapping fw; /* firmware mapping */
81 u64 sas_address;
82 u32 device_info; /* specific bits for devices */
83 u16 slot; /* enclosure slot id */
84 u64 enclosure_logical_id; /*enclosure address */
85 u8 is_logical_volume; /* is this logical volume */
86 /* this belongs to volume */
87 u8 is_hidden_raid_component;
88 /* this valid when is_hidden_raid_component set */
89 u8 volume_id;
90 /* cached data for a removed device */
91 u8 is_cached;
92};
93
67struct mptsas_hotplug_event { 94struct mptsas_hotplug_event {
68 struct work_struct work;
69 MPT_ADAPTER *ioc; 95 MPT_ADAPTER *ioc;
70 enum mptsas_hotplug_action event_type; 96 enum mptsas_hotplug_action event_type;
71 u64 sas_address; 97 u64 sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
73 u8 id; 99 u8 id;
74 u32 device_info; 100 u32 device_info;
75 u16 handle; 101 u16 handle;
76 u16 parent_handle;
77 u8 phy_id; 102 u8 phy_id;
78 u8 phys_disk_num_valid; /* hrc (hidden raid component) */
79 u8 phys_disk_num; /* hrc - unique index*/ 103 u8 phys_disk_num; /* hrc - unique index*/
80 u8 hidden_raid_component; /* hrc - don't expose*/ 104 struct scsi_device *sdev;
105};
106
107struct fw_event_work {
108 struct list_head list;
109 struct delayed_work work;
110 MPT_ADAPTER *ioc;
111 u32 event;
112 u8 retries;
113 u8 event_data[1];
81}; 114};
82 115
83struct mptsas_discovery_event { 116struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e62c6bc4ad33..024e8305bcf2 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION);
80/* 80/*
81 * Other private/forward protos... 81 * Other private/forward protos...
82 */ 82 */
83static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); 83struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); 84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); 85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); 86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
@@ -92,18 +92,24 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
92 SCSIIORequest_t *pReq, int req_idx); 92 SCSIIORequest_t *pReq, int req_idx);
93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); 93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
95static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
96static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
97 95
98static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); 96int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
97 int lun, int ctx2abort, ulong timeout);
99 98
100int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 99int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
101int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 100int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
102 101
102void
103mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
104static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
105 MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
103int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 106int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
104static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 107static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
105static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); 108static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
106 109
110static int
111mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
112 SCSITaskMgmtReply_t *pScsiTmReply);
107void mptscsih_remove(struct pci_dev *); 113void mptscsih_remove(struct pci_dev *);
108void mptscsih_shutdown(struct pci_dev *); 114void mptscsih_shutdown(struct pci_dev *);
109#ifdef CONFIG_PM 115#ifdef CONFIG_PM
@@ -113,69 +119,6 @@ int mptscsih_resume(struct pci_dev *pdev);
113 119
114#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE 120#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
115 121
116/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
117/**
118 * mptscsih_add_sge - Place a simple SGE at address pAddr.
119 * @pAddr: virtual address for SGE
120 * @flagslength: SGE flags and data transfer length
121 * @dma_addr: Physical address
122 *
123 * This routine places a MPT request frame back on the MPT adapter's
124 * FreeQ.
125 */
126static inline void
127mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
128{
129 if (sizeof(dma_addr_t) == sizeof(u64)) {
130 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
131 u32 tmp = dma_addr & 0xFFFFFFFF;
132
133 pSge->FlagsLength = cpu_to_le32(flagslength);
134 pSge->Address.Low = cpu_to_le32(tmp);
135 tmp = (u32) ((u64)dma_addr >> 32);
136 pSge->Address.High = cpu_to_le32(tmp);
137
138 } else {
139 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
140 pSge->FlagsLength = cpu_to_le32(flagslength);
141 pSge->Address = cpu_to_le32(dma_addr);
142 }
143} /* mptscsih_add_sge() */
144
145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146/**
147 * mptscsih_add_chain - Place a chain SGE at address pAddr.
148 * @pAddr: virtual address for SGE
149 * @next: nextChainOffset value (u32's)
150 * @length: length of next SGL segment
151 * @dma_addr: Physical address
152 *
153 * This routine places a MPT request frame back on the MPT adapter's
154 * FreeQ.
155 */
156static inline void
157mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
158{
159 if (sizeof(dma_addr_t) == sizeof(u64)) {
160 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
161 u32 tmp = dma_addr & 0xFFFFFFFF;
162
163 pChain->Length = cpu_to_le16(length);
164 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
165
166 pChain->NextChainOffset = next;
167
168 pChain->Address.Low = cpu_to_le32(tmp);
169 tmp = (u32) ((u64)dma_addr >> 32);
170 pChain->Address.High = cpu_to_le32(tmp);
171 } else {
172 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
173 pChain->Length = cpu_to_le16(length);
174 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
175 pChain->NextChainOffset = next;
176 pChain->Address = cpu_to_le32(dma_addr);
177 }
178} /* mptscsih_add_chain() */
179 122
180/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 123/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
181/* 124/*
@@ -281,10 +224,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
281 */ 224 */
282 225
283nextSGEset: 226nextSGEset:
284 numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) ); 227 numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
285 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; 228 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
286 229
287 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; 230 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
288 231
289 /* Get first (num - 1) SG elements 232 /* Get first (num - 1) SG elements
290 * Skip any SG entries with a length of 0 233 * Skip any SG entries with a length of 0
@@ -293,17 +236,19 @@ nextSGEset:
293 for (ii=0; ii < (numSgeThisFrame-1); ii++) { 236 for (ii=0; ii < (numSgeThisFrame-1); ii++) {
294 thisxfer = sg_dma_len(sg); 237 thisxfer = sg_dma_len(sg);
295 if (thisxfer == 0) { 238 if (thisxfer == 0) {
296 sg = sg_next(sg); /* Get next SG element from the OS */ 239 /* Get next SG element from the OS */
240 sg = sg_next(sg);
297 sg_done++; 241 sg_done++;
298 continue; 242 continue;
299 } 243 }
300 244
301 v2 = sg_dma_address(sg); 245 v2 = sg_dma_address(sg);
302 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 246 ioc->add_sge(psge, sgflags | thisxfer, v2);
303 247
304 sg = sg_next(sg); /* Get next SG element from the OS */ 248 /* Get next SG element from the OS */
305 psge += (sizeof(u32) + sizeof(dma_addr_t)); 249 sg = sg_next(sg);
306 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 250 psge += ioc->SGE_size;
251 sgeOffset += ioc->SGE_size;
307 sg_done++; 252 sg_done++;
308 } 253 }
309 254
@@ -320,12 +265,8 @@ nextSGEset:
320 thisxfer = sg_dma_len(sg); 265 thisxfer = sg_dma_len(sg);
321 266
322 v2 = sg_dma_address(sg); 267 v2 = sg_dma_address(sg);
323 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 268 ioc->add_sge(psge, sgflags | thisxfer, v2);
324 /* 269 sgeOffset += ioc->SGE_size;
325 sg = sg_next(sg);
326 psge += (sizeof(u32) + sizeof(dma_addr_t));
327 */
328 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
329 sg_done++; 270 sg_done++;
330 271
331 if (chainSge) { 272 if (chainSge) {
@@ -334,7 +275,8 @@ nextSGEset:
334 * Update the chain element 275 * Update the chain element
335 * Offset and Length fields. 276 * Offset and Length fields.
336 */ 277 */
337 mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 278 ioc->add_chain((char *)chainSge, 0, sgeOffset,
279 ioc->ChainBufferDMA + chain_dma_off);
338 } else { 280 } else {
339 /* The current buffer is the original MF 281 /* The current buffer is the original MF
340 * and there is no Chain buffer. 282 * and there is no Chain buffer.
@@ -367,7 +309,7 @@ nextSGEset:
367 * set properly). 309 * set properly).
368 */ 310 */
369 if (sg_done) { 311 if (sg_done) {
370 u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t))); 312 u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
371 sgflags = le32_to_cpu(*ptmp); 313 sgflags = le32_to_cpu(*ptmp);
372 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; 314 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
373 *ptmp = cpu_to_le32(sgflags); 315 *ptmp = cpu_to_le32(sgflags);
@@ -381,8 +323,9 @@ nextSGEset:
381 * Old chain element is now complete. 323 * Old chain element is now complete.
382 */ 324 */
383 u8 nextChain = (u8) (sgeOffset >> 2); 325 u8 nextChain = (u8) (sgeOffset >> 2);
384 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 326 sgeOffset += ioc->SGE_size;
385 mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 327 ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
328 ioc->ChainBufferDMA + chain_dma_off);
386 } else { 329 } else {
387 /* The original MF buffer requires a chain buffer - 330 /* The original MF buffer requires a chain buffer -
388 * set the offset. 331 * set the offset.
@@ -592,14 +535,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
592 } 535 }
593 536
594 scsi_print_command(sc); 537 scsi_print_command(sc);
595 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n", 538 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
596 ioc->name, pScsiReply->Bus, pScsiReply->TargetID); 539 ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
597 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, " 540 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
598 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow, 541 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
599 scsi_get_resid(sc)); 542 scsi_get_resid(sc));
600 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, " 543 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
601 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag), 544 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
602 le32_to_cpu(pScsiReply->TransferCount), sc->result); 545 le32_to_cpu(pScsiReply->TransferCount), sc->result);
546
603 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), " 547 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
604 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n", 548 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
605 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus, 549 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
@@ -654,16 +598,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
654 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); 598 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
655 req_idx_MR = (mr != NULL) ? 599 req_idx_MR = (mr != NULL) ?
656 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; 600 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
601
602 /* Special case, where already freed message frame is received from
603 * Firmware. It happens with Resetting IOC.
604 * Return immediately. Do not care
605 */
657 if ((req_idx != req_idx_MR) || 606 if ((req_idx != req_idx_MR) ||
658 (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) { 607 (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
659 printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
660 ioc->name);
661 printk (MYIOC_s_ERR_FMT
662 "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
663 ioc->name, req_idx, req_idx_MR, mf, mr,
664 mptscsih_get_scsi_lookup(ioc, req_idx_MR));
665 return 0; 608 return 0;
666 }
667 609
668 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); 610 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
669 if (sc == NULL) { 611 if (sc == NULL) {
@@ -810,12 +752,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
810 */ 752 */
811 753
812 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 754 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
813 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
814 /* Linux handles an unsolicited DID_RESET better 755 /* Linux handles an unsolicited DID_RESET better
815 * than an unsolicited DID_ABORT. 756 * than an unsolicited DID_ABORT.
816 */ 757 */
817 sc->result = DID_RESET << 16; 758 sc->result = DID_RESET << 16;
818 759
760 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
761 if (ioc->bus_type == FC)
762 sc->result = DID_ERROR << 16;
763 else
764 sc->result = DID_RESET << 16;
819 break; 765 break;
820 766
821 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 767 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -992,9 +938,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
992 scsi_dma_unmap(sc); 938 scsi_dma_unmap(sc);
993 sc->result = DID_RESET << 16; 939 sc->result = DID_RESET << 16;
994 sc->host_scribble = NULL; 940 sc->host_scribble = NULL;
995 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT 941 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
996 "completing cmds: fw_channel %d, fw_id %d, sc=%p," 942 "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
997 " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii); 943 "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
998 sc->scsi_done(sc); 944 sc->scsi_done(sc);
999 } 945 }
1000} 946}
@@ -1053,9 +999,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
1053 scsi_dma_unmap(sc); 999 scsi_dma_unmap(sc);
1054 sc->host_scribble = NULL; 1000 sc->host_scribble = NULL;
1055 sc->result = DID_NO_CONNECT << 16; 1001 sc->result = DID_NO_CONNECT << 16;
1056 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d," 1002 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
1057 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, 1003 MYIOC_s_FMT "completing cmds: fw_channel %d, "
1058 vdevice->vtarget->id, sc, mf, ii); 1004 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
1005 vdevice->vtarget->channel, vdevice->vtarget->id,
1006 sc, mf, ii));
1059 sc->scsi_done(sc); 1007 sc->scsi_done(sc);
1060 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1008 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1061 } 1009 }
@@ -1346,7 +1294,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1346 MPT_FRAME_HDR *mf; 1294 MPT_FRAME_HDR *mf;
1347 SCSIIORequest_t *pScsiReq; 1295 SCSIIORequest_t *pScsiReq;
1348 VirtDevice *vdevice = SCpnt->device->hostdata; 1296 VirtDevice *vdevice = SCpnt->device->hostdata;
1349 int lun;
1350 u32 datalen; 1297 u32 datalen;
1351 u32 scsictl; 1298 u32 scsictl;
1352 u32 scsidir; 1299 u32 scsidir;
@@ -1357,13 +1304,12 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1357 1304
1358 hd = shost_priv(SCpnt->device->host); 1305 hd = shost_priv(SCpnt->device->host);
1359 ioc = hd->ioc; 1306 ioc = hd->ioc;
1360 lun = SCpnt->device->lun;
1361 SCpnt->scsi_done = done; 1307 SCpnt->scsi_done = done;
1362 1308
1363 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", 1309 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
1364 ioc->name, SCpnt, done)); 1310 ioc->name, SCpnt, done));
1365 1311
1366 if (hd->resetPending) { 1312 if (ioc->taskmgmt_quiesce_io) {
1367 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", 1313 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1368 ioc->name, SCpnt)); 1314 ioc->name, SCpnt));
1369 return SCSI_MLQUEUE_HOST_BUSY; 1315 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1422,7 +1368,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1422 pScsiReq->CDBLength = SCpnt->cmd_len; 1368 pScsiReq->CDBLength = SCpnt->cmd_len;
1423 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; 1369 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1424 pScsiReq->Reserved = 0; 1370 pScsiReq->Reserved = 0;
1425 pScsiReq->MsgFlags = mpt_msg_flags(); 1371 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
1426 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN); 1372 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
1427 pScsiReq->Control = cpu_to_le32(scsictl); 1373 pScsiReq->Control = cpu_to_le32(scsictl);
1428 1374
@@ -1448,7 +1394,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1448 */ 1394 */
1449 if (datalen == 0) { 1395 if (datalen == 0) {
1450 /* Add a NULL SGE */ 1396 /* Add a NULL SGE */
1451 mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0, 1397 ioc->add_sge((char *)&pScsiReq->SGL,
1398 MPT_SGE_FLAGS_SSIMPLE_READ | 0,
1452 (dma_addr_t) -1); 1399 (dma_addr_t) -1);
1453 } else { 1400 } else {
1454 /* Add a 32 or 64 bit SGE */ 1401 /* Add a 32 or 64 bit SGE */
@@ -1528,8 +1475,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1528 1475
1529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1476/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1530/** 1477/**
1531 * mptscsih_TMHandler - Generic handler for SCSI Task Management. 1478 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1532 * @hd: Pointer to MPT SCSI HOST structure 1479 * @hd: Pointer to MPT_SCSI_HOST structure
1533 * @type: Task Management type 1480 * @type: Task Management type
1534 * @channel: channel number for task management 1481 * @channel: channel number for task management
1535 * @id: Logical Target ID for reset (if appropriate) 1482 * @id: Logical Target ID for reset (if appropriate)
@@ -1537,145 +1484,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1537 * @ctx2abort: Context for the task to be aborted (if appropriate) 1484 * @ctx2abort: Context for the task to be aborted (if appropriate)
1538 * @timeout: timeout for task management control 1485 * @timeout: timeout for task management control
1539 * 1486 *
1540 * Fall through to mpt_HardResetHandler if: not operational, too many 1487 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1541 * failed TM requests or handshake failure. 1488 * or a non-interrupt thread. In the former, must not call schedule().
1542 * 1489 *
1543 * Remark: Currently invoked from a non-interrupt thread (_bh). 1490 * Not all fields are meaningfull for all task types.
1544 * 1491 *
1545 * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC 1492 * Returns 0 for SUCCESS, or FAILED.
1546 * will be active.
1547 * 1493 *
1548 * Returns 0 for SUCCESS, or %FAILED.
1549 **/ 1494 **/
1550int 1495int
1551mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) 1496mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1497 int ctx2abort, ulong timeout)
1552{ 1498{
1553 MPT_ADAPTER *ioc; 1499 MPT_FRAME_HDR *mf;
1554 int rc = -1; 1500 SCSITaskMgmt_t *pScsiTm;
1501 int ii;
1502 int retval;
1503 MPT_ADAPTER *ioc = hd->ioc;
1504 unsigned long timeleft;
1505 u8 issue_hard_reset;
1555 u32 ioc_raw_state; 1506 u32 ioc_raw_state;
1556 unsigned long flags; 1507 unsigned long time_count;
1557
1558 ioc = hd->ioc;
1559 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name));
1560
1561 // SJR - CHECKME - Can we avoid this here?
1562 // (mpt_HardResetHandler has this check...)
1563 spin_lock_irqsave(&ioc->diagLock, flags);
1564 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
1565 spin_unlock_irqrestore(&ioc->diagLock, flags);
1566 return FAILED;
1567 }
1568 spin_unlock_irqrestore(&ioc->diagLock, flags);
1569
1570 /* Wait a fixed amount of time for the TM pending flag to be cleared.
1571 * If we time out and not bus reset, then we return a FAILED status
1572 * to the caller.
1573 * The call to mptscsih_tm_pending_wait() will set the pending flag
1574 * if we are
1575 * successful. Otherwise, reload the FW.
1576 */
1577 if (mptscsih_tm_pending_wait(hd) == FAILED) {
1578 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1579 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: "
1580 "Timed out waiting for last TM (%d) to complete! \n",
1581 ioc->name, hd->tmPending));
1582 return FAILED;
1583 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1584 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target "
1585 "reset: Timed out waiting for last TM (%d) "
1586 "to complete! \n", ioc->name,
1587 hd->tmPending));
1588 return FAILED;
1589 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
1590 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: "
1591 "Timed out waiting for last TM (%d) to complete! \n",
1592 ioc->name, hd->tmPending));
1593 return FAILED;
1594 }
1595 } else {
1596 spin_lock_irqsave(&ioc->FreeQlock, flags);
1597 hd->tmPending |= (1 << type);
1598 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1599 }
1600 1508
1509 issue_hard_reset = 0;
1601 ioc_raw_state = mpt_GetIocState(ioc, 0); 1510 ioc_raw_state = mpt_GetIocState(ioc, 0);
1602 1511
1603 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { 1512 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
1604 printk(MYIOC_s_WARN_FMT 1513 printk(MYIOC_s_WARN_FMT
1605 "TM Handler for type=%x: IOC Not operational (0x%x)!\n", 1514 "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
1606 ioc->name, type, ioc_raw_state); 1515 ioc->name, type, ioc_raw_state);
1607 printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name); 1516 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
1517 ioc->name, __func__);
1608 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) 1518 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
1609 printk(MYIOC_s_WARN_FMT "TMHandler: HardReset " 1519 printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
1610 "FAILED!!\n", ioc->name); 1520 "FAILED!!\n", ioc->name);
1611 return FAILED; 1521 return 0;
1612 } 1522 }
1613 1523
1614 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { 1524 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
1615 printk(MYIOC_s_WARN_FMT 1525 printk(MYIOC_s_WARN_FMT
1616 "TM Handler for type=%x: ioc_state: " 1526 "TaskMgmt type=%x: ioc_state: "
1617 "DOORBELL_ACTIVE (0x%x)!\n", 1527 "DOORBELL_ACTIVE (0x%x)!\n",
1618 ioc->name, type, ioc_raw_state); 1528 ioc->name, type, ioc_raw_state);
1619 return FAILED; 1529 return FAILED;
1620 } 1530 }
1621 1531
1622 /* Isse the Task Mgmt request. 1532 mutex_lock(&ioc->taskmgmt_cmds.mutex);
1623 */ 1533 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
1624 if (hd->hard_resets < -1) 1534 mf = NULL;
1625 hd->hard_resets++; 1535 retval = FAILED;
1626 1536 goto out;
1627 rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun, 1537 }
1628 ctx2abort, timeout);
1629 if (rc)
1630 printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n",
1631 ioc->name);
1632 else
1633 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n",
1634 ioc->name));
1635
1636 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1637 "TMHandler rc = %d!\n", ioc->name, rc));
1638
1639 return rc;
1640}
1641
1642
1643/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1644/**
1645 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1646 * @hd: Pointer to MPT_SCSI_HOST structure
1647 * @type: Task Management type
1648 * @channel: channel number for task management
1649 * @id: Logical Target ID for reset (if appropriate)
1650 * @lun: Logical Unit for reset (if appropriate)
1651 * @ctx2abort: Context for the task to be aborted (if appropriate)
1652 * @timeout: timeout for task management control
1653 *
1654 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1655 * or a non-interrupt thread. In the former, must not call schedule().
1656 *
1657 * Not all fields are meaningfull for all task types.
1658 *
1659 * Returns 0 for SUCCESS, or FAILED.
1660 *
1661 **/
1662static int
1663mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
1664{
1665 MPT_FRAME_HDR *mf;
1666 SCSITaskMgmt_t *pScsiTm;
1667 int ii;
1668 int retval;
1669 MPT_ADAPTER *ioc = hd->ioc;
1670 1538
1671 /* Return Fail to calling function if no message frames available. 1539 /* Return Fail to calling function if no message frames available.
1672 */ 1540 */
1673 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 1541 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
1674 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1542 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1675 ioc->name)); 1543 "TaskMgmt no msg frames!!\n", ioc->name));
1676 return FAILED; 1544 retval = FAILED;
1545 mpt_clear_taskmgmt_in_progress_flag(ioc);
1546 goto out;
1677 } 1547 }
1678 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 1548 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1679 ioc->name, mf)); 1549 ioc->name, mf));
1680 1550
1681 /* Format the Request 1551 /* Format the Request
@@ -1699,11 +1569,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1699 1569
1700 pScsiTm->TaskMsgContext = ctx2abort; 1570 pScsiTm->TaskMsgContext = ctx2abort;
1701 1571
1702 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " 1572 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
1703 "type=%d\n", ioc->name, ctx2abort, type)); 1573 "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
1574 type, timeout));
1704 1575
1705 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); 1576 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
1706 1577
1578 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1579 time_count = jiffies;
1707 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 1580 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
1708 (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 1581 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
1709 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1582 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
@@ -1711,47 +1584,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1711 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, 1584 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
1712 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 1585 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
1713 if (retval) { 1586 if (retval) {
1714 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" 1587 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1715 " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, 1588 "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
1716 ioc, mf, retval)); 1589 ioc->name, mf, retval));
1717 goto fail_out; 1590 mpt_free_msg_frame(ioc, mf);
1591 mpt_clear_taskmgmt_in_progress_flag(ioc);
1592 goto out;
1718 } 1593 }
1719 } 1594 }
1720 1595
1721 if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { 1596 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
1722 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" 1597 timeout*HZ);
1723 " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, 1598 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
1724 ioc, mf)); 1599 retval = FAILED;
1725 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 1600 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
1726 ioc->name)); 1601 "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
1727 retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 1602 mpt_clear_taskmgmt_in_progress_flag(ioc);
1728 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", 1603 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
1729 ioc->name, retval)); 1604 goto out;
1730 goto fail_out; 1605 issue_hard_reset = 1;
1606 goto out;
1731 } 1607 }
1732 1608
1733 /* 1609 retval = mptscsih_taskmgmt_reply(ioc, type,
1734 * Handle success case, see if theres a non-zero ioc_status. 1610 (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
1735 */
1736 if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS ||
1737 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1738 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1739 retval = 0;
1740 else
1741 retval = FAILED;
1742 1611
1743 return retval; 1612 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1613 "TaskMgmt completed (%d seconds)\n",
1614 ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
1744 1615
1745 fail_out: 1616 out:
1746 1617
1747 /* 1618 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1748 * Free task management mf, and corresponding tm flags 1619 if (issue_hard_reset) {
1749 */ 1620 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
1750 mpt_free_msg_frame(ioc, mf); 1621 ioc->name, __func__);
1751 hd->tmPending = 0; 1622 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
1752 hd->tmState = TM_STATE_NONE; 1623 mpt_free_msg_frame(ioc, mf);
1753 return FAILED; 1624 }
1625
1626 retval = (retval == 0) ? 0 : FAILED;
1627 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
1628 return retval;
1754} 1629}
1630EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
1755 1631
1756static int 1632static int
1757mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) 1633mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
@@ -1838,13 +1714,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1838 goto out; 1714 goto out;
1839 } 1715 }
1840 1716
1841 if (hd->resetPending) { 1717 if (ioc->timeouts < -1)
1842 retval = FAILED; 1718 ioc->timeouts++;
1843 goto out;
1844 }
1845
1846 if (hd->timeouts < -1)
1847 hd->timeouts++;
1848 1719
1849 if (mpt_fwfault_debug) 1720 if (mpt_fwfault_debug)
1850 mpt_halt_firmware(ioc); 1721 mpt_halt_firmware(ioc);
@@ -1861,22 +1732,30 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1861 1732
1862 hd->abortSCpnt = SCpnt; 1733 hd->abortSCpnt = SCpnt;
1863 1734
1864 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1735 retval = mptscsih_IssueTaskMgmt(hd,
1865 vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, 1736 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1866 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1737 vdevice->vtarget->channel,
1738 vdevice->vtarget->id, vdevice->lun,
1739 ctx2abort, mptscsih_get_tm_timeout(ioc));
1867 1740
1868 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && 1741 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx &&
1869 SCpnt->serial_number == sn) 1742 SCpnt->serial_number == sn) {
1743 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1744 "task abort: command still in active list! (sc=%p)\n",
1745 ioc->name, SCpnt));
1870 retval = FAILED; 1746 retval = FAILED;
1747 } else {
1748 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1749 "task abort: command cleared from active list! (sc=%p)\n",
1750 ioc->name, SCpnt));
1751 retval = SUCCESS;
1752 }
1871 1753
1872 out: 1754 out:
1873 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1755 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
1874 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1756 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
1875 1757
1876 if (retval == 0) 1758 return retval;
1877 return SUCCESS;
1878 else
1879 return FAILED;
1880} 1759}
1881 1760
1882/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1761/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1909,14 +1788,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1909 ioc->name, SCpnt); 1788 ioc->name, SCpnt);
1910 scsi_print_command(SCpnt); 1789 scsi_print_command(SCpnt);
1911 1790
1912 if (hd->resetPending) {
1913 retval = FAILED;
1914 goto out;
1915 }
1916
1917 vdevice = SCpnt->device->hostdata; 1791 vdevice = SCpnt->device->hostdata;
1918 if (!vdevice || !vdevice->vtarget) { 1792 if (!vdevice || !vdevice->vtarget) {
1919 retval = 0; 1793 retval = SUCCESS;
1920 goto out; 1794 goto out;
1921 } 1795 }
1922 1796
@@ -1927,9 +1801,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1927 goto out; 1801 goto out;
1928 } 1802 }
1929 1803
1930 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1804 retval = mptscsih_IssueTaskMgmt(hd,
1931 vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0, 1805 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1932 mptscsih_get_tm_timeout(ioc)); 1806 vdevice->vtarget->channel,
1807 vdevice->vtarget->id, 0, 0,
1808 mptscsih_get_tm_timeout(ioc));
1933 1809
1934 out: 1810 out:
1935 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n", 1811 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
@@ -1972,12 +1848,16 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1972 ioc->name, SCpnt); 1848 ioc->name, SCpnt);
1973 scsi_print_command(SCpnt); 1849 scsi_print_command(SCpnt);
1974 1850
1975 if (hd->timeouts < -1) 1851 if (ioc->timeouts < -1)
1976 hd->timeouts++; 1852 ioc->timeouts++;
1977 1853
1978 vdevice = SCpnt->device->hostdata; 1854 vdevice = SCpnt->device->hostdata;
1979 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1855 if (!vdevice || !vdevice->vtarget)
1980 vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc)); 1856 return SUCCESS;
1857 retval = mptscsih_IssueTaskMgmt(hd,
1858 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1859 vdevice->vtarget->channel, 0, 0, 0,
1860 mptscsih_get_tm_timeout(ioc));
1981 1861
1982 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n", 1862 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
1983 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1863 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2001,8 +1881,9 @@ int
2001mptscsih_host_reset(struct scsi_cmnd *SCpnt) 1881mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2002{ 1882{
2003 MPT_SCSI_HOST * hd; 1883 MPT_SCSI_HOST * hd;
2004 int retval; 1884 int status = SUCCESS;
2005 MPT_ADAPTER *ioc; 1885 MPT_ADAPTER *ioc;
1886 int retval;
2006 1887
2007 /* If we can't locate the host to reset, then we failed. */ 1888 /* If we can't locate the host to reset, then we failed. */
2008 if ((hd = shost_priv(SCpnt->device->host)) == NULL){ 1889 if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -2021,86 +1902,71 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2021 /* If our attempts to reset the host failed, then return a failed 1902 /* If our attempts to reset the host failed, then return a failed
2022 * status. The host will be taken off line by the SCSI mid-layer. 1903 * status. The host will be taken off line by the SCSI mid-layer.
2023 */ 1904 */
2024 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { 1905 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
2025 retval = FAILED; 1906 if (retval < 0)
2026 } else { 1907 status = FAILED;
2027 /* Make sure TM pending is cleared and TM state is set to 1908 else
2028 * NONE. 1909 status = SUCCESS;
2029 */
2030 retval = 0;
2031 hd->tmPending = 0;
2032 hd->tmState = TM_STATE_NONE;
2033 }
2034 1910
2035 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", 1911 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
2036 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1912 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
2037 1913
2038 return retval; 1914 return status;
2039} 1915}
2040 1916
2041/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2042/**
2043 * mptscsih_tm_pending_wait - wait for pending task management request to complete
2044 * @hd: Pointer to MPT host structure.
2045 *
2046 * Returns {SUCCESS,FAILED}.
2047 */
2048static int 1917static int
2049mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) 1918mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
1919 SCSITaskMgmtReply_t *pScsiTmReply)
2050{ 1920{
2051 unsigned long flags; 1921 u16 iocstatus;
2052 int loop_count = 4 * 10; /* Wait 10 seconds */ 1922 u32 termination_count;
2053 int status = FAILED; 1923 int retval;
2054 MPT_ADAPTER *ioc = hd->ioc;
2055 1924
2056 do { 1925 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
2057 spin_lock_irqsave(&ioc->FreeQlock, flags); 1926 retval = FAILED;
2058 if (hd->tmState == TM_STATE_NONE) { 1927 goto out;
2059 hd->tmState = TM_STATE_IN_PROGRESS; 1928 }
2060 hd->tmPending = 1;
2061 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2062 status = SUCCESS;
2063 break;
2064 }
2065 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2066 msleep(250);
2067 } while (--loop_count);
2068 1929
2069 return status; 1930 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2070}
2071 1931
2072/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1932 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2073/** 1933 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2074 * mptscsih_tm_wait_for_completion - wait for completion of TM task
2075 * @hd: Pointer to MPT host structure.
2076 * @timeout: timeout value
2077 *
2078 * Returns {SUCCESS,FAILED}.
2079 */
2080static int
2081mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
2082{
2083 unsigned long flags;
2084 int loop_count = 4 * timeout;
2085 int status = FAILED;
2086 MPT_ADAPTER *ioc = hd->ioc;
2087 1934
2088 do { 1935 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2089 spin_lock_irqsave(&ioc->FreeQlock, flags); 1936 "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
2090 if(hd->tmPending == 0) { 1937 "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
2091 status = SUCCESS; 1938 "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
2092 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 1939 pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
2093 break; 1940 le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
2094 } 1941 termination_count));
2095 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2096 msleep(250);
2097 } while (--loop_count);
2098 1942
2099 return status; 1943 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
1944 pScsiTmReply->ResponseCode)
1945 mptscsih_taskmgmt_response_code(ioc,
1946 pScsiTmReply->ResponseCode);
1947
1948 if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
1949 retval = 0;
1950 goto out;
1951 }
1952
1953 retval = FAILED;
1954 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1955 if (termination_count == 1)
1956 retval = 0;
1957 goto out;
1958 }
1959
1960 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1961 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1962 retval = 0;
1963
1964 out:
1965 return retval;
2100} 1966}
2101 1967
2102/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1968/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2103static void 1969void
2104mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) 1970mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2105{ 1971{
2106 char *desc; 1972 char *desc;
@@ -2134,6 +2000,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2134 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", 2000 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
2135 ioc->name, response_code, desc); 2001 ioc->name, response_code, desc);
2136} 2002}
2003EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
2137 2004
2138/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2005/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2139/** 2006/**
@@ -2150,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2150 * Returns 1 indicating alloc'd request frame ptr should be freed. 2017 * Returns 1 indicating alloc'd request frame ptr should be freed.
2151 **/ 2018 **/
2152int 2019int
2153mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2020mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
2021 MPT_FRAME_HDR *mr)
2154{ 2022{
2155 SCSITaskMgmtReply_t *pScsiTmReply; 2023 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2156 SCSITaskMgmt_t *pScsiTmReq; 2024 "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
2157 MPT_SCSI_HOST *hd;
2158 unsigned long flags;
2159 u16 iocstatus;
2160 u8 tmType;
2161 u32 termination_count;
2162
2163 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
2164 ioc->name, mf, mr));
2165 if (!ioc->sh) {
2166 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2167 "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name));
2168 return 1;
2169 }
2170
2171 if (mr == NULL) {
2172 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2173 "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf));
2174 return 1;
2175 }
2176
2177 hd = shost_priv(ioc->sh);
2178 pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
2179 pScsiTmReq = (SCSITaskMgmt_t*)mf;
2180 tmType = pScsiTmReq->TaskType;
2181 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2182 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2183 2025
2184 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && 2026 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2185 pScsiTmReply->ResponseCode)
2186 mptscsih_taskmgmt_response_code(ioc,
2187 pScsiTmReply->ResponseCode);
2188 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2189 2027
2190#ifdef CONFIG_FUSION_LOGGING 2028 if (!mr)
2191 if ((ioc->debug_level & MPT_DEBUG_REPLY) ||
2192 (ioc->debug_level & MPT_DEBUG_TM ))
2193 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
2194 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
2195 "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
2196 pScsiTmReply->TargetID, pScsiTmReq->TaskType,
2197 le16_to_cpu(pScsiTmReply->IOCStatus),
2198 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
2199 le32_to_cpu(pScsiTmReply->TerminationCount));
2200#endif
2201 if (!iocstatus) {
2202 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name));
2203 hd->abortSCpnt = NULL;
2204 goto out; 2029 goto out;
2205 }
2206
2207 /* Error? (anything non-zero?) */
2208
2209 /* clear flags and continue.
2210 */
2211 switch (tmType) {
2212
2213 case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2214 if (termination_count == 1)
2215 iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
2216 hd->abortSCpnt = NULL;
2217 break;
2218
2219 case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
2220
2221 /* If an internal command is present
2222 * or the TM failed - reload the FW.
2223 * FC FW may respond FAILED to an ABORT
2224 */
2225 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED ||
2226 hd->cmdPtr)
2227 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
2228 printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name);
2229 break;
2230
2231 case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2232 default:
2233 break;
2234 }
2235 2030
2031 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2032 memcpy(ioc->taskmgmt_cmds.reply, mr,
2033 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
2236 out: 2034 out:
2237 spin_lock_irqsave(&ioc->FreeQlock, flags); 2035 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
2238 hd->tmPending = 0; 2036 mpt_clear_taskmgmt_in_progress_flag(ioc);
2239 hd->tmState = TM_STATE_NONE; 2037 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2240 hd->tm_iocstatus = iocstatus; 2038 complete(&ioc->taskmgmt_cmds.done);
2241 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2039 return 1;
2242 2040 }
2243 return 1; 2041 return 0;
2244} 2042}
2245 2043
2246/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2044/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2290,8 +2088,10 @@ int
2290mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) 2088mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2291{ 2089{
2292 struct inactive_raid_component_info *component_info; 2090 struct inactive_raid_component_info *component_info;
2293 int i; 2091 int i, j;
2092 RaidPhysDiskPage1_t *phys_disk;
2294 int rc = 0; 2093 int rc = 0;
2094 int num_paths;
2295 2095
2296 if (!ioc->raid_data.pIocPg3) 2096 if (!ioc->raid_data.pIocPg3)
2297 goto out; 2097 goto out;
@@ -2303,6 +2103,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2303 } 2103 }
2304 } 2104 }
2305 2105
2106 if (ioc->bus_type != SAS)
2107 goto out;
2108
2109 /*
2110 * Check if dual path
2111 */
2112 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2113 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2114 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2115 if (num_paths < 2)
2116 continue;
2117 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2118 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2119 if (!phys_disk)
2120 continue;
2121 if ((mpt_raid_phys_disk_pg1(ioc,
2122 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2123 phys_disk))) {
2124 kfree(phys_disk);
2125 continue;
2126 }
2127 for (j = 0; j < num_paths; j++) {
2128 if ((phys_disk->Path[j].Flags &
2129 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2130 continue;
2131 if ((phys_disk->Path[j].Flags &
2132 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2133 continue;
2134 if ((id == phys_disk->Path[j].PhysDiskID) &&
2135 (channel == phys_disk->Path[j].PhysDiskBus)) {
2136 rc = 1;
2137 kfree(phys_disk);
2138 goto out;
2139 }
2140 }
2141 kfree(phys_disk);
2142 }
2143
2144
2306 /* 2145 /*
2307 * Check inactive list for matching phys disks 2146 * Check inactive list for matching phys disks
2308 */ 2147 */
@@ -2327,8 +2166,10 @@ u8
2327mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 2166mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2328{ 2167{
2329 struct inactive_raid_component_info *component_info; 2168 struct inactive_raid_component_info *component_info;
2330 int i; 2169 int i, j;
2170 RaidPhysDiskPage1_t *phys_disk;
2331 int rc = -ENXIO; 2171 int rc = -ENXIO;
2172 int num_paths;
2332 2173
2333 if (!ioc->raid_data.pIocPg3) 2174 if (!ioc->raid_data.pIocPg3)
2334 goto out; 2175 goto out;
@@ -2340,6 +2181,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2340 } 2181 }
2341 } 2182 }
2342 2183
2184 if (ioc->bus_type != SAS)
2185 goto out;
2186
2187 /*
2188 * Check if dual path
2189 */
2190 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2191 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2192 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2193 if (num_paths < 2)
2194 continue;
2195 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2196 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2197 if (!phys_disk)
2198 continue;
2199 if ((mpt_raid_phys_disk_pg1(ioc,
2200 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2201 phys_disk))) {
2202 kfree(phys_disk);
2203 continue;
2204 }
2205 for (j = 0; j < num_paths; j++) {
2206 if ((phys_disk->Path[j].Flags &
2207 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2208 continue;
2209 if ((phys_disk->Path[j].Flags &
2210 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2211 continue;
2212 if ((id == phys_disk->Path[j].PhysDiskID) &&
2213 (channel == phys_disk->Path[j].PhysDiskBus)) {
2214 rc = phys_disk->PhysDiskNum;
2215 kfree(phys_disk);
2216 goto out;
2217 }
2218 }
2219 kfree(phys_disk);
2220 }
2221
2343 /* 2222 /*
2344 * Check inactive list for matching phys disks 2223 * Check inactive list for matching phys disks
2345 */ 2224 */
@@ -2457,7 +2336,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2457 sdev->ppr, sdev->inquiry_len)); 2336 sdev->ppr, sdev->inquiry_len));
2458 2337
2459 vdevice->configured_lun = 1; 2338 vdevice->configured_lun = 1;
2460 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2461 2339
2462 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2340 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2463 "Queue depth=%d, tflags=%x\n", 2341 "Queue depth=%d, tflags=%x\n",
@@ -2469,6 +2347,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2469 ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2347 ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2470 vtarget->minSyncFactor)); 2348 vtarget->minSyncFactor));
2471 2349
2350 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2472 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2351 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2473 "tagged %d, simple %d, ordered %d\n", 2352 "tagged %d, simple %d, ordered %d\n",
2474 ioc->name,sdev->tagged_supported, sdev->simple_tags, 2353 ioc->name,sdev->tagged_supported, sdev->simple_tags,
@@ -2542,15 +2421,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2542} 2421}
2543 2422
2544/** 2423/**
2545 * mptscsih_get_scsi_lookup 2424 * mptscsih_get_scsi_lookup - retrieves scmd entry
2546 * @ioc: Pointer to MPT_ADAPTER structure 2425 * @ioc: Pointer to MPT_ADAPTER structure
2547 * @i: index into the array 2426 * @i: index into the array
2548 * 2427 *
2549 * retrieves scmd entry from ScsiLookup[] array list
2550 *
2551 * Returns the scsi_cmd pointer 2428 * Returns the scsi_cmd pointer
2552 **/ 2429 */
2553static struct scsi_cmnd * 2430struct scsi_cmnd *
2554mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) 2431mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2555{ 2432{
2556 unsigned long flags; 2433 unsigned long flags;
@@ -2562,15 +2439,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2562 2439
2563 return scmd; 2440 return scmd;
2564} 2441}
2442EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
2565 2443
2566/** 2444/**
2567 * mptscsih_getclear_scsi_lookup 2445 * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
2568 * @ioc: Pointer to MPT_ADAPTER structure 2446 * @ioc: Pointer to MPT_ADAPTER structure
2569 * @i: index into the array 2447 * @i: index into the array
2570 * 2448 *
2571 * retrieves and clears scmd entry from ScsiLookup[] array list
2572 *
2573 * Returns the scsi_cmd pointer 2449 * Returns the scsi_cmd pointer
2450 *
2574 **/ 2451 **/
2575static struct scsi_cmnd * 2452static struct scsi_cmnd *
2576mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) 2453mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2635,94 +2512,33 @@ int
2635mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 2512mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2636{ 2513{
2637 MPT_SCSI_HOST *hd; 2514 MPT_SCSI_HOST *hd;
2638 unsigned long flags;
2639 2515
2640 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2641 ": IOC %s_reset routed to SCSI host driver!\n",
2642 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
2643 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
2644
2645 /* If a FW reload request arrives after base installed but
2646 * before all scsi hosts have been attached, then an alt_ioc
2647 * may have a NULL sh pointer.
2648 */
2649 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) 2516 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
2650 return 0; 2517 return 0;
2651 else
2652 hd = shost_priv(ioc->sh);
2653
2654 if (reset_phase == MPT_IOC_SETUP_RESET) {
2655 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name));
2656
2657 /* Clean Up:
2658 * 1. Set Hard Reset Pending Flag
2659 * All new commands go to doneQ
2660 */
2661 hd->resetPending = 1;
2662
2663 } else if (reset_phase == MPT_IOC_PRE_RESET) {
2664 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name));
2665 2518
2666 /* 2. Flush running commands 2519 hd = shost_priv(ioc->sh);
2667 * Clean ScsiLookup (and associated memory) 2520 switch (reset_phase) {
2668 * AND clean mytaskQ 2521 case MPT_IOC_SETUP_RESET:
2669 */ 2522 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2670 2523 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
2671 /* 2b. Reply to OS all known outstanding I/O commands. 2524 break;
2672 */ 2525 case MPT_IOC_PRE_RESET:
2526 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2527 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
2673 mptscsih_flush_running_cmds(hd); 2528 mptscsih_flush_running_cmds(hd);
2674 2529 break;
2675 /* 2c. If there was an internal command that 2530 case MPT_IOC_POST_RESET:
2676 * has not completed, configuration or io request, 2531 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2677 * free these resources. 2532 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
2678 */ 2533 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
2679 if (hd->cmdPtr) { 2534 ioc->internal_cmds.status |=
2680 del_timer(&hd->timer); 2535 MPT_MGMT_STATUS_DID_IOCRESET;
2681 mpt_free_msg_frame(ioc, hd->cmdPtr); 2536 complete(&ioc->internal_cmds.done);
2682 }
2683
2684 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name));
2685
2686 } else {
2687 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name));
2688
2689 /* Once a FW reload begins, all new OS commands are
2690 * redirected to the doneQ w/ a reset status.
2691 * Init all control structures.
2692 */
2693
2694 /* 2. Chain Buffer initialization
2695 */
2696
2697 /* 4. Renegotiate to all devices, if SPI
2698 */
2699
2700 /* 5. Enable new commands to be posted
2701 */
2702 spin_lock_irqsave(&ioc->FreeQlock, flags);
2703 hd->tmPending = 0;
2704 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2705 hd->resetPending = 0;
2706 hd->tmState = TM_STATE_NONE;
2707
2708 /* 6. If there was an internal command,
2709 * wake this process up.
2710 */
2711 if (hd->cmdPtr) {
2712 /*
2713 * Wake up the original calling thread
2714 */
2715 hd->pLocal = &hd->localReply;
2716 hd->pLocal->completion = MPT_SCANDV_DID_RESET;
2717 hd->scandv_wait_done = 1;
2718 wake_up(&hd->scandv_waitq);
2719 hd->cmdPtr = NULL;
2720 } 2537 }
2721 2538 break;
2722 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name)); 2539 default:
2723 2540 break;
2724 } 2541 }
2725
2726 return 1; /* currently means nothing really */ 2542 return 1; /* currently means nothing really */
2727} 2543}
2728 2544
@@ -2730,55 +2546,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2730int 2546int
2731mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2547mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2732{ 2548{
2733 MPT_SCSI_HOST *hd;
2734 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 2549 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2735 2550
2736 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2551 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2737 ioc->name, event)); 2552 "MPT event (=%02Xh) routed to SCSI host driver!\n",
2738 2553 ioc->name, event));
2739 if (ioc->sh == NULL ||
2740 ((hd = shost_priv(ioc->sh)) == NULL))
2741 return 1;
2742
2743 switch (event) {
2744 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2745 /* FIXME! */
2746 break;
2747 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2748 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2749 if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
2750 hd->soft_resets++;
2751 break;
2752 case MPI_EVENT_LOGOUT: /* 09 */
2753 /* FIXME! */
2754 break;
2755
2756 case MPI_EVENT_RESCAN: /* 06 */
2757 break;
2758
2759 /*
2760 * CHECKME! Don't think we need to do
2761 * anything for these, but...
2762 */
2763 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
2764 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
2765 /*
2766 * CHECKME! Falling thru...
2767 */
2768 break;
2769
2770 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2771 break;
2772 2554
2773 case MPI_EVENT_NONE: /* 00 */ 2555 if ((event == MPI_EVENT_IOC_BUS_RESET ||
2774 case MPI_EVENT_LOG_DATA: /* 01 */ 2556 event == MPI_EVENT_EXT_BUS_RESET) &&
2775 case MPI_EVENT_STATE_CHANGE: /* 02 */ 2557 (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
2776 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 2558 ioc->soft_resets++;
2777 default:
2778 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n",
2779 ioc->name, event));
2780 break;
2781 }
2782 2559
2783 return 1; /* currently means nothing really */ 2560 return 1; /* currently means nothing really */
2784} 2561}
@@ -2809,153 +2586,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2809 * Used ONLY for DV and other internal commands. 2586 * Used ONLY for DV and other internal commands.
2810 */ 2587 */
2811int 2588int
2812mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2589mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2590 MPT_FRAME_HDR *reply)
2813{ 2591{
2814 MPT_SCSI_HOST *hd;
2815 SCSIIORequest_t *pReq; 2592 SCSIIORequest_t *pReq;
2816 int completionCode; 2593 SCSIIOReply_t *pReply;
2594 u8 cmd;
2817 u16 req_idx; 2595 u16 req_idx;
2596 u8 *sense_data;
2597 int sz;
2818 2598
2819 hd = shost_priv(ioc->sh); 2599 ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2820 2600 ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
2821 if ((mf == NULL) || 2601 if (!reply)
2822 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { 2602 goto out;
2823 printk(MYIOC_s_ERR_FMT
2824 "ScanDvComplete, %s req frame ptr! (=%p)\n",
2825 ioc->name, mf?"BAD":"NULL", (void *) mf);
2826 goto wakeup;
2827 }
2828
2829 del_timer(&hd->timer);
2830 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
2831 mptscsih_set_scsi_lookup(ioc, req_idx, NULL);
2832 pReq = (SCSIIORequest_t *) mf;
2833 2603
2834 if (mf != hd->cmdPtr) { 2604 pReply = (SCSIIOReply_t *) reply;
2835 printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", 2605 pReq = (SCSIIORequest_t *) req;
2836 ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); 2606 ioc->internal_cmds.completion_code =
2607 mptscsih_get_completion_code(ioc, req, reply);
2608 ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2609 memcpy(ioc->internal_cmds.reply, reply,
2610 min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
2611 cmd = reply->u.hdr.Function;
2612 if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
2613 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
2614 (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
2615 req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
2616 sense_data = ((u8 *)ioc->sense_buf_pool +
2617 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2618 sz = min_t(int, pReq->SenseBufferLength,
2619 MPT_SENSE_BUFFER_ALLOC);
2620 memcpy(ioc->internal_cmds.sense, sense_data, sz);
2837 } 2621 }
2838 hd->cmdPtr = NULL; 2622 out:
2839 2623 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
2840 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", 2624 return 0;
2841 ioc->name, mf, mr, req_idx)); 2625 ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2842 2626 complete(&ioc->internal_cmds.done);
2843 hd->pLocal = &hd->localReply;
2844 hd->pLocal->scsiStatus = 0;
2845
2846 /* If target struct exists, clear sense valid flag.
2847 */
2848 if (mr == NULL) {
2849 completionCode = MPT_SCANDV_GOOD;
2850 } else {
2851 SCSIIOReply_t *pReply;
2852 u16 status;
2853 u8 scsi_status;
2854
2855 pReply = (SCSIIOReply_t *) mr;
2856
2857 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2858 scsi_status = pReply->SCSIStatus;
2859
2860
2861 switch(status) {
2862
2863 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2864 completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
2865 break;
2866
2867 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2868 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2869 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2870 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2871 completionCode = MPT_SCANDV_DID_RESET;
2872 break;
2873
2874 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2875 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2876 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2877 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2878 ConfigReply_t *pr = (ConfigReply_t *)mr;
2879 completionCode = MPT_SCANDV_GOOD;
2880 hd->pLocal->header.PageVersion = pr->Header.PageVersion;
2881 hd->pLocal->header.PageLength = pr->Header.PageLength;
2882 hd->pLocal->header.PageNumber = pr->Header.PageNumber;
2883 hd->pLocal->header.PageType = pr->Header.PageType;
2884
2885 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2886 /* If the RAID Volume request is successful,
2887 * return GOOD, else indicate that
2888 * some type of error occurred.
2889 */
2890 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
2891 if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
2892 completionCode = MPT_SCANDV_GOOD;
2893 else
2894 completionCode = MPT_SCANDV_SOME_ERROR;
2895 memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
2896
2897 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
2898 u8 *sense_data;
2899 int sz;
2900
2901 /* save sense data in global structure
2902 */
2903 completionCode = MPT_SCANDV_SENSE;
2904 hd->pLocal->scsiStatus = scsi_status;
2905 sense_data = ((u8 *)ioc->sense_buf_pool +
2906 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2907
2908 sz = min_t(int, pReq->SenseBufferLength,
2909 SCSI_STD_SENSE_BYTES);
2910 memcpy(hd->pLocal->sense, sense_data, sz);
2911
2912 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n",
2913 ioc->name, sense_data));
2914 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2915 if (pReq->CDB[0] == INQUIRY)
2916 completionCode = MPT_SCANDV_ISSUE_SENSE;
2917 else
2918 completionCode = MPT_SCANDV_DID_RESET;
2919 }
2920 else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2921 completionCode = MPT_SCANDV_DID_RESET;
2922 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2923 completionCode = MPT_SCANDV_DID_RESET;
2924 else {
2925 completionCode = MPT_SCANDV_GOOD;
2926 hd->pLocal->scsiStatus = scsi_status;
2927 }
2928 break;
2929
2930 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2931 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2932 completionCode = MPT_SCANDV_DID_RESET;
2933 else
2934 completionCode = MPT_SCANDV_SOME_ERROR;
2935 break;
2936
2937 default:
2938 completionCode = MPT_SCANDV_SOME_ERROR;
2939 break;
2940
2941 } /* switch(status) */
2942
2943 } /* end of address reply case */
2944
2945 hd->pLocal->completion = completionCode;
2946
2947 /* MF and RF are freed in mpt_interrupt
2948 */
2949wakeup:
2950 /* Free Chain buffers (will never chain) in scan or dv */
2951 //mptscsih_freeChainBuffers(ioc, req_idx);
2952
2953 /*
2954 * Wake up the original calling thread
2955 */
2956 hd->scandv_wait_done = 1;
2957 wake_up(&hd->scandv_waitq);
2958
2959 return 1; 2627 return 1;
2960} 2628}
2961 2629
@@ -3004,6 +2672,95 @@ mptscsih_timer_expired(unsigned long data)
3004 return; 2672 return;
3005} 2673}
3006 2674
2675/**
2676 * mptscsih_get_completion_code -
2677 * @ioc: Pointer to MPT_ADAPTER structure
2678 * @reply:
2679 * @cmd:
2680 *
2681 **/
2682static int
2683mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2684 MPT_FRAME_HDR *reply)
2685{
2686 SCSIIOReply_t *pReply;
2687 MpiRaidActionReply_t *pr;
2688 u8 scsi_status;
2689 u16 status;
2690 int completion_code;
2691
2692 pReply = (SCSIIOReply_t *)reply;
2693 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2694 scsi_status = pReply->SCSIStatus;
2695
2696 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2697 "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
2698 "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
2699 scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
2700
2701 switch (status) {
2702
2703 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2704 completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
2705 break;
2706
2707 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2708 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2709 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2710 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2711 completion_code = MPT_SCANDV_DID_RESET;
2712 break;
2713
2714 case MPI_IOCSTATUS_BUSY:
2715 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2716 completion_code = MPT_SCANDV_BUSY;
2717 break;
2718
2719 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2720 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2721 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2722 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2723 completion_code = MPT_SCANDV_GOOD;
2724 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2725 pr = (MpiRaidActionReply_t *)reply;
2726 if (le16_to_cpu(pr->ActionStatus) ==
2727 MPI_RAID_ACTION_ASTATUS_SUCCESS)
2728 completion_code = MPT_SCANDV_GOOD;
2729 else
2730 completion_code = MPT_SCANDV_SOME_ERROR;
2731 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
2732 completion_code = MPT_SCANDV_SENSE;
2733 else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2734 if (req->u.scsireq.CDB[0] == INQUIRY)
2735 completion_code = MPT_SCANDV_ISSUE_SENSE;
2736 else
2737 completion_code = MPT_SCANDV_DID_RESET;
2738 } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2739 completion_code = MPT_SCANDV_DID_RESET;
2740 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2741 completion_code = MPT_SCANDV_DID_RESET;
2742 else if (scsi_status == MPI_SCSI_STATUS_BUSY)
2743 completion_code = MPT_SCANDV_BUSY;
2744 else
2745 completion_code = MPT_SCANDV_GOOD;
2746 break;
2747
2748 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2749 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2750 completion_code = MPT_SCANDV_DID_RESET;
2751 else
2752 completion_code = MPT_SCANDV_SOME_ERROR;
2753 break;
2754 default:
2755 completion_code = MPT_SCANDV_SOME_ERROR;
2756 break;
2757
2758 } /* switch(status) */
2759
2760 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2761 " completionCode set to %08xh\n", ioc->name, completion_code));
2762 return completion_code;
2763}
3007 2764
3008/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2765/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3009/** 2766/**
@@ -3030,22 +2787,27 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3030{ 2787{
3031 MPT_FRAME_HDR *mf; 2788 MPT_FRAME_HDR *mf;
3032 SCSIIORequest_t *pScsiReq; 2789 SCSIIORequest_t *pScsiReq;
3033 SCSIIORequest_t ReqCopy;
3034 int my_idx, ii, dir; 2790 int my_idx, ii, dir;
3035 int rc, cmdTimeout; 2791 int timeout;
3036 int in_isr;
3037 char cmdLen; 2792 char cmdLen;
3038 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; 2793 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3039 char cmd = io->cmd; 2794 u8 cmd = io->cmd;
3040 MPT_ADAPTER *ioc = hd->ioc; 2795 MPT_ADAPTER *ioc = hd->ioc;
2796 int ret = 0;
2797 unsigned long timeleft;
2798 unsigned long flags;
3041 2799
3042 in_isr = in_interrupt(); 2800 /* don't send internal command during diag reset */
3043 if (in_isr) { 2801 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3044 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", 2802 if (ioc->ioc_reset_in_progress) {
3045 ioc->name)); 2803 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3046 return -EPERM; 2804 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2805 "%s: busy with host reset\n", ioc->name, __func__));
2806 return MPT_SCANDV_BUSY;
3047 } 2807 }
2808 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3048 2809
2810 mutex_lock(&ioc->internal_cmds.mutex);
3049 2811
3050 /* Set command specific information 2812 /* Set command specific information
3051 */ 2813 */
@@ -3055,13 +2817,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3055 dir = MPI_SCSIIO_CONTROL_READ; 2817 dir = MPI_SCSIIO_CONTROL_READ;
3056 CDB[0] = cmd; 2818 CDB[0] = cmd;
3057 CDB[4] = io->size; 2819 CDB[4] = io->size;
3058 cmdTimeout = 10; 2820 timeout = 10;
3059 break; 2821 break;
3060 2822
3061 case TEST_UNIT_READY: 2823 case TEST_UNIT_READY:
3062 cmdLen = 6; 2824 cmdLen = 6;
3063 dir = MPI_SCSIIO_CONTROL_READ; 2825 dir = MPI_SCSIIO_CONTROL_READ;
3064 cmdTimeout = 10; 2826 timeout = 10;
3065 break; 2827 break;
3066 2828
3067 case START_STOP: 2829 case START_STOP:
@@ -3069,7 +2831,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3069 dir = MPI_SCSIIO_CONTROL_READ; 2831 dir = MPI_SCSIIO_CONTROL_READ;
3070 CDB[0] = cmd; 2832 CDB[0] = cmd;
3071 CDB[4] = 1; /*Spin up the disk */ 2833 CDB[4] = 1; /*Spin up the disk */
3072 cmdTimeout = 15; 2834 timeout = 15;
3073 break; 2835 break;
3074 2836
3075 case REQUEST_SENSE: 2837 case REQUEST_SENSE:
@@ -3077,7 +2839,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3077 CDB[0] = cmd; 2839 CDB[0] = cmd;
3078 CDB[4] = io->size; 2840 CDB[4] = io->size;
3079 dir = MPI_SCSIIO_CONTROL_READ; 2841 dir = MPI_SCSIIO_CONTROL_READ;
3080 cmdTimeout = 10; 2842 timeout = 10;
3081 break; 2843 break;
3082 2844
3083 case READ_BUFFER: 2845 case READ_BUFFER:
@@ -3096,7 +2858,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3096 CDB[6] = (io->size >> 16) & 0xFF; 2858 CDB[6] = (io->size >> 16) & 0xFF;
3097 CDB[7] = (io->size >> 8) & 0xFF; 2859 CDB[7] = (io->size >> 8) & 0xFF;
3098 CDB[8] = io->size & 0xFF; 2860 CDB[8] = io->size & 0xFF;
3099 cmdTimeout = 10; 2861 timeout = 10;
3100 break; 2862 break;
3101 2863
3102 case WRITE_BUFFER: 2864 case WRITE_BUFFER:
@@ -3111,21 +2873,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3111 CDB[6] = (io->size >> 16) & 0xFF; 2873 CDB[6] = (io->size >> 16) & 0xFF;
3112 CDB[7] = (io->size >> 8) & 0xFF; 2874 CDB[7] = (io->size >> 8) & 0xFF;
3113 CDB[8] = io->size & 0xFF; 2875 CDB[8] = io->size & 0xFF;
3114 cmdTimeout = 10; 2876 timeout = 10;
3115 break; 2877 break;
3116 2878
3117 case RESERVE: 2879 case RESERVE:
3118 cmdLen = 6; 2880 cmdLen = 6;
3119 dir = MPI_SCSIIO_CONTROL_READ; 2881 dir = MPI_SCSIIO_CONTROL_READ;
3120 CDB[0] = cmd; 2882 CDB[0] = cmd;
3121 cmdTimeout = 10; 2883 timeout = 10;
3122 break; 2884 break;
3123 2885
3124 case RELEASE: 2886 case RELEASE:
3125 cmdLen = 6; 2887 cmdLen = 6;
3126 dir = MPI_SCSIIO_CONTROL_READ; 2888 dir = MPI_SCSIIO_CONTROL_READ;
3127 CDB[0] = cmd; 2889 CDB[0] = cmd;
3128 cmdTimeout = 10; 2890 timeout = 10;
3129 break; 2891 break;
3130 2892
3131 case SYNCHRONIZE_CACHE: 2893 case SYNCHRONIZE_CACHE:
@@ -3133,20 +2895,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3133 dir = MPI_SCSIIO_CONTROL_READ; 2895 dir = MPI_SCSIIO_CONTROL_READ;
3134 CDB[0] = cmd; 2896 CDB[0] = cmd;
3135// CDB[1] = 0x02; /* set immediate bit */ 2897// CDB[1] = 0x02; /* set immediate bit */
3136 cmdTimeout = 10; 2898 timeout = 10;
3137 break; 2899 break;
3138 2900
3139 default: 2901 default:
3140 /* Error Case */ 2902 /* Error Case */
3141 return -EFAULT; 2903 ret = -EFAULT;
2904 goto out;
3142 } 2905 }
3143 2906
3144 /* Get and Populate a free Frame 2907 /* Get and Populate a free Frame
2908 * MsgContext set in mpt_get_msg_frame call
3145 */ 2909 */
3146 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 2910 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
3147 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", 2911 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
3148 ioc->name)); 2912 ioc->name, __func__));
3149 return -EBUSY; 2913 ret = MPT_SCANDV_BUSY;
2914 goto out;
3150 } 2915 }
3151 2916
3152 pScsiReq = (SCSIIORequest_t *) mf; 2917 pScsiReq = (SCSIIORequest_t *) mf;
@@ -3172,7 +2937,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3172 2937
3173 pScsiReq->Reserved = 0; 2938 pScsiReq->Reserved = 0;
3174 2939
3175 pScsiReq->MsgFlags = mpt_msg_flags(); 2940 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
3176 /* MsgContext set in mpt_get_msg_fram call */ 2941 /* MsgContext set in mpt_get_msg_fram call */
3177 2942
3178 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN); 2943 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
@@ -3184,74 +2949,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3184 2949
3185 if (cmd == REQUEST_SENSE) { 2950 if (cmd == REQUEST_SENSE) {
3186 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); 2951 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
3187 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", 2952 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3188 ioc->name, cmd)); 2953 "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
3189 } 2954 }
3190 2955
3191 for (ii=0; ii < 16; ii++) 2956 for (ii = 0; ii < 16; ii++)
3192 pScsiReq->CDB[ii] = CDB[ii]; 2957 pScsiReq->CDB[ii] = CDB[ii];
3193 2958
3194 pScsiReq->DataLength = cpu_to_le32(io->size); 2959 pScsiReq->DataLength = cpu_to_le32(io->size);
3195 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma 2960 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
3196 + (my_idx * MPT_SENSE_BUFFER_ALLOC)); 2961 + (my_idx * MPT_SENSE_BUFFER_ALLOC));
3197 2962
3198 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", 2963 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3199 ioc->name, cmd, io->channel, io->id, io->lun)); 2964 "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
2965 ioc->name, __func__, cmd, io->channel, io->id, io->lun));
3200 2966
3201 if (dir == MPI_SCSIIO_CONTROL_READ) { 2967 if (dir == MPI_SCSIIO_CONTROL_READ)
3202 mpt_add_sge((char *) &pScsiReq->SGL, 2968 ioc->add_sge((char *) &pScsiReq->SGL,
3203 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, 2969 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
3204 io->data_dma); 2970 else
3205 } else { 2971 ioc->add_sge((char *) &pScsiReq->SGL,
3206 mpt_add_sge((char *) &pScsiReq->SGL, 2972 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
3207 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
3208 io->data_dma);
3209 }
3210
3211 /* The ISR will free the request frame, but we need
3212 * the information to initialize the target. Duplicate.
3213 */
3214 memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
3215
3216 /* Issue this command after:
3217 * finish init
3218 * add timer
3219 * Wait until the reply has been received
3220 * ScsiScanDvCtx callback function will
3221 * set hd->pLocal;
3222 * set scandv_wait_done and call wake_up
3223 */
3224 hd->pLocal = NULL;
3225 hd->timer.expires = jiffies + HZ*cmdTimeout;
3226 hd->scandv_wait_done = 0;
3227
3228 /* Save cmd pointer, for resource free if timeout or
3229 * FW reload occurs
3230 */
3231 hd->cmdPtr = mf;
3232 2973
3233 add_timer(&hd->timer); 2974 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
3234 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 2975 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
3235 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 2976 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
3236 2977 timeout*HZ);
3237 if (hd->pLocal) { 2978 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
3238 rc = hd->pLocal->completion; 2979 ret = MPT_SCANDV_DID_RESET;
3239 hd->pLocal->skip = 0; 2980 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3240 2981 "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
3241 /* Always set fatal error codes in some cases. 2982 cmd));
3242 */ 2983 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
3243 if (rc == MPT_SCANDV_SELECTION_TIMEOUT) 2984 mpt_free_msg_frame(ioc, mf);
3244 rc = -ENXIO; 2985 goto out;
3245 else if (rc == MPT_SCANDV_SOME_ERROR) 2986 }
3246 rc = -rc; 2987 if (!timeleft) {
3247 } else { 2988 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
3248 rc = -EFAULT; 2989 ioc->name, __func__);
3249 /* This should never happen. */ 2990 mpt_HardResetHandler(ioc, CAN_SLEEP);
3250 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", 2991 mpt_free_msg_frame(ioc, mf);
3251 ioc->name)); 2992 }
2993 goto out;
3252 } 2994 }
3253 2995
3254 return rc; 2996 ret = ioc->internal_cmds.completion_code;
2997 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
2998 ioc->name, __func__, ret));
2999
3000 out:
3001 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
3002 mutex_unlock(&ioc->internal_cmds.mutex);
3003 return ret;
3255} 3004}
3256 3005
3257/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3006/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3491,6 +3240,7 @@ struct device_attribute *mptscsih_host_attrs[] = {
3491 &dev_attr_debug_level, 3240 &dev_attr_debug_level,
3492 NULL, 3241 NULL,
3493}; 3242};
3243
3494EXPORT_SYMBOL(mptscsih_host_attrs); 3244EXPORT_SYMBOL(mptscsih_host_attrs);
3495 3245
3496EXPORT_SYMBOL(mptscsih_remove); 3246EXPORT_SYMBOL(mptscsih_remove);
@@ -3516,6 +3266,5 @@ EXPORT_SYMBOL(mptscsih_event_process);
3516EXPORT_SYMBOL(mptscsih_ioc_reset); 3266EXPORT_SYMBOL(mptscsih_ioc_reset);
3517EXPORT_SYMBOL(mptscsih_change_queue_depth); 3267EXPORT_SYMBOL(mptscsih_change_queue_depth);
3518EXPORT_SYMBOL(mptscsih_timer_expired); 3268EXPORT_SYMBOL(mptscsih_timer_expired);
3519EXPORT_SYMBOL(mptscsih_TMHandler);
3520 3269
3521/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3270/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 319aa3033371..eb3f677528ac 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -60,6 +60,7 @@
60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) 60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
61#define MPT_SCANDV_ISSUE_SENSE (0x00000010) 61#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
62#define MPT_SCANDV_FALLBACK (0x00000020) 62#define MPT_SCANDV_FALLBACK (0x00000020)
63#define MPT_SCANDV_BUSY (0x00000040)
63 64
64#define MPT_SCANDV_MAX_RETRIES (10) 65#define MPT_SCANDV_MAX_RETRIES (10)
65 66
@@ -89,6 +90,7 @@
89 90
90#endif 91#endif
91 92
93
92typedef struct _internal_cmd { 94typedef struct _internal_cmd {
93 char *data; /* data pointer */ 95 char *data; /* data pointer */
94 dma_addr_t data_dma; /* data dma address */ 96 dma_addr_t data_dma; /* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
112extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); 114extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
113extern const char * mptscsih_info(struct Scsi_Host *SChost); 115extern const char * mptscsih_info(struct Scsi_Host *SChost);
114extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); 116extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
117extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
118 u8 id, int lun, int ctx2abort, ulong timeout);
115extern void mptscsih_slave_destroy(struct scsi_device *device); 119extern void mptscsih_slave_destroy(struct scsi_device *device);
116extern int mptscsih_slave_configure(struct scsi_device *device); 120extern int mptscsih_slave_configure(struct scsi_device *device);
117extern int mptscsih_abort(struct scsi_cmnd * SCpnt); 121extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
126extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
127extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
128extern void mptscsih_timer_expired(unsigned long data); 132extern void mptscsih_timer_expired(unsigned long data);
129extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
130extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
131extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
132extern struct device_attribute *mptscsih_host_attrs[]; 135extern struct device_attribute *mptscsih_host_attrs[];
136extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
137extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 61620144e49c..c5b808fd55ba 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | 300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
301 (IOCPage4Ptr->Header.PageLength + ii) * 4; 301 (IOCPage4Ptr->Header.PageLength + ii) * 4;
302 302
303 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); 303 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
304 304
305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", 306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; 614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
615} 615}
616 616
617static int 617int
618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) 618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
619{ 619{
620 MPT_ADAPTER *ioc = hd->ioc;
620 MpiRaidActionRequest_t *pReq; 621 MpiRaidActionRequest_t *pReq;
621 MPT_FRAME_HDR *mf; 622 MPT_FRAME_HDR *mf;
622 MPT_ADAPTER *ioc = hd->ioc; 623 int ret;
624 unsigned long timeleft;
625
626 mutex_lock(&ioc->internal_cmds.mutex);
623 627
624 /* Get and Populate a free Frame 628 /* Get and Populate a free Frame
625 */ 629 */
626 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 630 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
627 ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", 631 dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
628 ioc->name)); 632 "%s: no msg frames!\n", ioc->name, __func__));
629 return -EAGAIN; 633 ret = -EAGAIN;
634 goto out;
630 } 635 }
631 pReq = (MpiRaidActionRequest_t *)mf; 636 pReq = (MpiRaidActionRequest_t *)mf;
632 if (quiesce) 637 if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
643 pReq->Reserved2 = 0; 648 pReq->Reserved2 = 0;
644 pReq->ActionDataWord = 0; /* Reserved for this action */ 649 pReq->ActionDataWord = 0; /* Reserved for this action */
645 650
646 mpt_add_sge((char *)&pReq->ActionDataSGE, 651 ioc->add_sge((char *)&pReq->ActionDataSGE,
647 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); 652 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
648 653
649 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", 654 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
650 ioc->name, pReq->Action, channel, id)); 655 ioc->name, pReq->Action, channel, id));
651 656
652 hd->pLocal = NULL; 657 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
653 hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
654 hd->scandv_wait_done = 0;
655
656 /* Save cmd pointer, for resource free if timeout or
657 * FW reload occurs
658 */
659 hd->cmdPtr = mf;
660
661 add_timer(&hd->timer);
662 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 658 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
663 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 659 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
660 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
661 ret = -ETIME;
662 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
663 ioc->name, __func__));
664 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
665 goto out;
666 if (!timeleft) {
667 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
668 ioc->name, __func__);
669 mpt_HardResetHandler(ioc, CAN_SLEEP);
670 mpt_free_msg_frame(ioc, mf);
671 }
672 goto out;
673 }
664 674
665 if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) 675 ret = ioc->internal_cmds.completion_code;
666 return -1;
667 676
668 return 0; 677 out:
678 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
679 mutex_unlock(&ioc->internal_cmds.mutex);
680 return ret;
669} 681}
670 682
671static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, 683static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1423 * A slightly different algorithm is required for 1435 * A slightly different algorithm is required for
1424 * 64bit SGEs. 1436 * 64bit SGEs.
1425 */ 1437 */
1426 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1438 scale = ioc->req_sz/ioc->SGE_size;
1427 if (sizeof(dma_addr_t) == sizeof(u64)) { 1439 if (ioc->sg_addr_size == sizeof(u64)) {
1428 numSGE = (scale - 1) * 1440 numSGE = (scale - 1) *
1429 (ioc->facts.MaxChainDepth-1) + scale + 1441 (ioc->facts.MaxChainDepth-1) + scale +
1430 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1442 (ioc->req_sz - 60) / ioc->SGE_size;
1431 sizeof(u32));
1432 } else { 1443 } else {
1433 numSGE = 1 + (scale - 1) * 1444 numSGE = 1 + (scale - 1) *
1434 (ioc->facts.MaxChainDepth-1) + scale + 1445 (ioc->facts.MaxChainDepth-1) + scale +
1435 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1446 (ioc->req_sz - 64) / ioc->SGE_size;
1436 sizeof(u32));
1437 } 1447 }
1438 1448
1439 if (numSGE < sh->sg_tablesize) { 1449 if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1464 1474
1465 /* Clear the TM flags 1475 /* Clear the TM flags
1466 */ 1476 */
1467 hd->tmPending = 0;
1468 hd->tmState = TM_STATE_NONE;
1469 hd->resetPending = 0;
1470 hd->abortSCpnt = NULL; 1477 hd->abortSCpnt = NULL;
1471 1478
1472 /* Clear the pointer used to store 1479 /* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1493 mpt_saf_te)); 1500 mpt_saf_te));
1494 ioc->spi_data.noQas = 0; 1501 ioc->spi_data.noQas = 0;
1495 1502
1496 init_waitqueue_head(&hd->scandv_waitq);
1497 hd->scandv_wait_done = 0;
1498 hd->last_queue_full = 0; 1503 hd->last_queue_full = 0;
1499 hd->spi_pending = 0; 1504 hd->spi_pending = 0;
1500 1505
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1514 * issue internal bus reset 1519 * issue internal bus reset
1515 */ 1520 */
1516 if (ioc->spi_data.bus_reset) 1521 if (ioc->spi_data.bus_reset)
1517 mptscsih_TMHandler(hd, 1522 mptscsih_IssueTaskMgmt(hd,
1518 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1523 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1519 0, 0, 0, 0, 5); 1524 0, 0, 0, 0, 5);
1520 1525
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d1ef75..f3c4a3b910bb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2264,6 +2264,17 @@ config BNX2
2264 To compile this driver as a module, choose M here: the module 2264 To compile this driver as a module, choose M here: the module
2265 will be called bnx2. This is recommended. 2265 will be called bnx2. This is recommended.
2266 2266
2267config CNIC
2268 tristate "Broadcom CNIC support"
2269 depends on BNX2
2270 depends on UIO
2271 help
2272 This driver supports offload features of Broadcom NetXtremeII
2273 gigabit Ethernet cards.
2274
2275 To compile this driver as a module, choose M here: the module
2276 will be called cnic. This is recommended.
2277
2267config SPIDER_NET 2278config SPIDER_NET
2268 tristate "Spider Gigabit Ethernet driver" 2279 tristate "Spider Gigabit Ethernet driver"
2269 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) 2280 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a1c25cb4669f..db30ebd7b262 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
73obj-$(CONFIG_FEALNX) += fealnx.o 73obj-$(CONFIG_FEALNX) += fealnx.o
74obj-$(CONFIG_TIGON3) += tg3.o 74obj-$(CONFIG_TIGON3) += tg3.o
75obj-$(CONFIG_BNX2) += bnx2.o 75obj-$(CONFIG_BNX2) += bnx2.o
76obj-$(CONFIG_CNIC) += cnic.o
76obj-$(CONFIG_BNX2X) += bnx2x.o 77obj-$(CONFIG_BNX2X) += bnx2x.o
77bnx2x-objs := bnx2x_main.o bnx2x_link.o 78bnx2x-objs := bnx2x_main.o bnx2x_link.o
78spidernet-y += spider_net.o spider_net_ethtool.o 79spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b0cb29d4cc01..3f5fcb0156a1 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,10 @@
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51 51
52#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53#define BCM_CNIC 1
54#include "cnic_if.h"
55#endif
52#include "bnx2.h" 56#include "bnx2.h"
53#include "bnx2_fw.h" 57#include "bnx2_fw.h"
54 58
@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
315 spin_unlock_bh(&bp->indirect_lock); 319 spin_unlock_bh(&bp->indirect_lock);
316} 320}
317 321
322#ifdef BCM_CNIC
323static int
324bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
325{
326 struct bnx2 *bp = netdev_priv(dev);
327 struct drv_ctl_io *io = &info->data.io;
328
329 switch (info->cmd) {
330 case DRV_CTL_IO_WR_CMD:
331 bnx2_reg_wr_ind(bp, io->offset, io->data);
332 break;
333 case DRV_CTL_IO_RD_CMD:
334 io->data = bnx2_reg_rd_ind(bp, io->offset);
335 break;
336 case DRV_CTL_CTX_WR_CMD:
337 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
338 break;
339 default:
340 return -EINVAL;
341 }
342 return 0;
343}
344
345static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
346{
347 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
348 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
349 int sb_id;
350
351 if (bp->flags & BNX2_FLAG_USING_MSIX) {
352 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
353 bnapi->cnic_present = 0;
354 sb_id = bp->irq_nvecs;
355 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
356 } else {
357 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_tag = bnapi->last_status_idx;
359 bnapi->cnic_present = 1;
360 sb_id = 0;
361 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
362 }
363
364 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
365 cp->irq_arr[0].status_blk = (void *)
366 ((unsigned long) bnapi->status_blk.msi +
367 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
368 cp->irq_arr[0].status_blk_num = sb_id;
369 cp->num_irq = 1;
370}
371
372static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
373 void *data)
374{
375 struct bnx2 *bp = netdev_priv(dev);
376 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
377
378 if (ops == NULL)
379 return -EINVAL;
380
381 if (cp->drv_state & CNIC_DRV_STATE_REGD)
382 return -EBUSY;
383
384 bp->cnic_data = data;
385 rcu_assign_pointer(bp->cnic_ops, ops);
386
387 cp->num_irq = 0;
388 cp->drv_state = CNIC_DRV_STATE_REGD;
389
390 bnx2_setup_cnic_irq_info(bp);
391
392 return 0;
393}
394
395static int bnx2_unregister_cnic(struct net_device *dev)
396{
397 struct bnx2 *bp = netdev_priv(dev);
398 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
399 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
400
401 cp->drv_state = 0;
402 bnapi->cnic_present = 0;
403 rcu_assign_pointer(bp->cnic_ops, NULL);
404 synchronize_rcu();
405 return 0;
406}
407
408struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
409{
410 struct bnx2 *bp = netdev_priv(dev);
411 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412
413 cp->drv_owner = THIS_MODULE;
414 cp->chip_id = bp->chip_id;
415 cp->pdev = bp->pdev;
416 cp->io_base = bp->regview;
417 cp->drv_ctl = bnx2_drv_ctl;
418 cp->drv_register_cnic = bnx2_register_cnic;
419 cp->drv_unregister_cnic = bnx2_unregister_cnic;
420
421 return cp;
422}
423EXPORT_SYMBOL(bnx2_cnic_probe);
424
425static void
426bnx2_cnic_stop(struct bnx2 *bp)
427{
428 struct cnic_ops *c_ops;
429 struct cnic_ctl_info info;
430
431 rcu_read_lock();
432 c_ops = rcu_dereference(bp->cnic_ops);
433 if (c_ops) {
434 info.cmd = CNIC_CTL_STOP_CMD;
435 c_ops->cnic_ctl(bp->cnic_data, &info);
436 }
437 rcu_read_unlock();
438}
439
440static void
441bnx2_cnic_start(struct bnx2 *bp)
442{
443 struct cnic_ops *c_ops;
444 struct cnic_ctl_info info;
445
446 rcu_read_lock();
447 c_ops = rcu_dereference(bp->cnic_ops);
448 if (c_ops) {
449 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
450 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
451
452 bnapi->cnic_tag = bnapi->last_status_idx;
453 }
454 info.cmd = CNIC_CTL_START_CMD;
455 c_ops->cnic_ctl(bp->cnic_data, &info);
456 }
457 rcu_read_unlock();
458}
459
460#else
461
462static void
463bnx2_cnic_stop(struct bnx2 *bp)
464{
465}
466
467static void
468bnx2_cnic_start(struct bnx2 *bp)
469{
470}
471
472#endif
473
318static int 474static int
319bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) 475bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
320{ 476{
@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
488static void 644static void
489bnx2_netif_stop(struct bnx2 *bp) 645bnx2_netif_stop(struct bnx2 *bp)
490{ 646{
647 bnx2_cnic_stop(bp);
491 bnx2_disable_int_sync(bp); 648 bnx2_disable_int_sync(bp);
492 if (netif_running(bp->dev)) { 649 if (netif_running(bp->dev)) {
493 bnx2_napi_disable(bp); 650 bnx2_napi_disable(bp);
@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
504 netif_tx_wake_all_queues(bp->dev); 661 netif_tx_wake_all_queues(bp->dev);
505 bnx2_napi_enable(bp); 662 bnx2_napi_enable(bp);
506 bnx2_enable_int(bp); 663 bnx2_enable_int(bp);
664 bnx2_cnic_start(bp);
507 } 665 }
508 } 666 }
509} 667}
@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3164 if (bnx2_has_fast_work(bnapi)) 3322 if (bnx2_has_fast_work(bnapi))
3165 return 1; 3323 return 1;
3166 3324
3325#ifdef BCM_CNIC
3326 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3327 return 1;
3328#endif
3329
3167 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != 3330 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3168 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) 3331 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3169 return 1; 3332 return 1;
@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
3193 bp->idle_chk_status_idx = bnapi->last_status_idx; 3356 bp->idle_chk_status_idx = bnapi->last_status_idx;
3194} 3357}
3195 3358
3359#ifdef BCM_CNIC
3360static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3361{
3362 struct cnic_ops *c_ops;
3363
3364 if (!bnapi->cnic_present)
3365 return;
3366
3367 rcu_read_lock();
3368 c_ops = rcu_dereference(bp->cnic_ops);
3369 if (c_ops)
3370 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3371 bnapi->status_blk.msi);
3372 rcu_read_unlock();
3373}
3374#endif
3375
3196static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3376static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3197{ 3377{
3198 struct status_block *sblk = bnapi->status_blk.msi; 3378 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3267 3447
3268 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3448 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3269 3449
3450#ifdef BCM_CNIC
3451 bnx2_poll_cnic(bp, bnapi);
3452#endif
3453
3270 /* bnapi->last_status_idx is used below to tell the hw how 3454 /* bnapi->last_status_idx is used below to tell the hw how
3271 * much work has been processed, so we must read it before 3455 * much work has been processed, so we must read it before
3272 * checking for more work. 3456 * checking for more work.
@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
4632 val = REG_RD(bp, BNX2_MQ_CONFIG); 4816 val = REG_RD(bp, BNX2_MQ_CONFIG);
4633 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4817 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4634 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4818 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4635 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) 4819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4636 val |= BNX2_MQ_CONFIG_HALT_DIS; 4820 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4821 if (CHIP_REV(bp) == CHIP_REV_Ax)
4822 val |= BNX2_MQ_CONFIG_HALT_DIS;
4823 }
4637 4824
4638 REG_WR(bp, BNX2_MQ_CONFIG, val); 4825 REG_WR(bp, BNX2_MQ_CONFIG, val);
4639 4826
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7471 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7658 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7472 7659
7473 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7660 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7474 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); 7661 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7475 dev->mem_end = dev->mem_start + mem_len; 7662 dev->mem_end = dev->mem_start + mem_len;
7476 dev->irq = pdev->irq; 7663 dev->irq = pdev->irq;
7477 7664
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5b570e17c839..a1ff739bc9b5 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,6 +361,9 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
364#define BNX2_L2CTX_HOST_BSEQ 0x00000008 367#define BNX2_L2CTX_HOST_BSEQ 0x00000008
365#define BNX2_L2CTX_NX_BSEQ 0x0000000c 368#define BNX2_L2CTX_NX_BSEQ 0x0000000c
366#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
5900#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) 5903#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
5901 5904
5902#define BNX2_RXP_SCRATCH 0x000e0000 5905#define BNX2_RXP_SCRATCH 0x000e0000
5906#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
5903#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 5907#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
5904#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c 5908#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
5905#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 5909#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
@@ -6678,6 +6682,11 @@ struct bnx2_napi {
6678 u32 last_status_idx; 6682 u32 last_status_idx;
6679 u32 int_num; 6683 u32 int_num;
6680 6684
6685#ifdef BCM_CNIC
6686 u32 cnic_tag;
6687 int cnic_present;
6688#endif
6689
6681 struct bnx2_rx_ring_info rx_ring; 6690 struct bnx2_rx_ring_info rx_ring;
6682 struct bnx2_tx_ring_info tx_ring; 6691 struct bnx2_tx_ring_info tx_ring;
6683}; 6692};
@@ -6727,6 +6736,11 @@ struct bnx2 {
6727 int tx_ring_size; 6736 int tx_ring_size;
6728 u32 tx_wake_thresh; 6737 u32 tx_wake_thresh;
6729 6738
6739#ifdef BCM_CNIC
6740 struct cnic_ops *cnic_ops;
6741 void *cnic_data;
6742#endif
6743
6730 /* End of fields used in the performance code paths. */ 6744 /* End of fields used in the performance code paths. */
6731 6745
6732 unsigned int current_interval; 6746 unsigned int current_interval;
@@ -6885,6 +6899,10 @@ struct bnx2 {
6885 6899
6886 u32 idle_chk_status_idx; 6900 u32 idle_chk_status_idx;
6887 6901
6902#ifdef BCM_CNIC
6903 struct cnic_eth_dev cnic_eth_dev;
6904#endif
6905
6888 const struct firmware *mips_firmware; 6906 const struct firmware *mips_firmware;
6889 const struct firmware *rv2p_firmware; 6907 const struct firmware *rv2p_firmware;
6890}; 6908};
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 000000000000..8d740376bbd2
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2711 @@
1/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
13#include <linux/module.h>
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/uio_driver.h>
23#include <linux/in.h>
24#include <linux/dma-mapping.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
29#define BCM_VLAN 1
30#endif
31#include <net/ip.h>
32#include <net/tcp.h>
33#include <net/route.h>
34#include <net/ipv6.h>
35#include <net/ip6_route.h>
36#include <scsi/iscsi_if.h>
37
38#include "cnic_if.h"
39#include "bnx2.h"
40#include "cnic.h"
41#include "cnic_defs.h"
42
43#define DRV_MODULE_NAME "cnic"
44#define PFX DRV_MODULE_NAME ": "
45
46static char version[] __devinitdata =
47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
48
49MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
50 "Chen (zongxi@broadcom.com");
51MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(CNIC_MODULE_VERSION);
54
55static LIST_HEAD(cnic_dev_list);
56static DEFINE_RWLOCK(cnic_dev_lock);
57static DEFINE_MUTEX(cnic_lock);
58
59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
60
61static int cnic_service_bnx2(void *, void *);
62static int cnic_ctl(void *, struct cnic_ctl_info *);
63
64static struct cnic_ops cnic_bnx2_ops = {
65 .cnic_owner = THIS_MODULE,
66 .cnic_handler = cnic_service_bnx2,
67 .cnic_ctl = cnic_ctl,
68};
69
70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
71static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
72static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
73static int cnic_cm_set_pg(struct cnic_sock *);
74
75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
76{
77 struct cnic_dev *dev = uinfo->priv;
78 struct cnic_local *cp = dev->cnic_priv;
79
80 if (!capable(CAP_NET_ADMIN))
81 return -EPERM;
82
83 if (cp->uio_dev != -1)
84 return -EBUSY;
85
86 cp->uio_dev = iminor(inode);
87
88 cnic_shutdown_bnx2_rx_ring(dev);
89
90 cnic_init_bnx2_tx_ring(dev);
91 cnic_init_bnx2_rx_ring(dev);
92
93 return 0;
94}
95
96static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
97{
98 struct cnic_dev *dev = uinfo->priv;
99 struct cnic_local *cp = dev->cnic_priv;
100
101 cp->uio_dev = -1;
102 return 0;
103}
104
105static inline void cnic_hold(struct cnic_dev *dev)
106{
107 atomic_inc(&dev->ref_count);
108}
109
110static inline void cnic_put(struct cnic_dev *dev)
111{
112 atomic_dec(&dev->ref_count);
113}
114
115static inline void csk_hold(struct cnic_sock *csk)
116{
117 atomic_inc(&csk->ref_count);
118}
119
120static inline void csk_put(struct cnic_sock *csk)
121{
122 atomic_dec(&csk->ref_count);
123}
124
125static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
126{
127 struct cnic_dev *cdev;
128
129 read_lock(&cnic_dev_lock);
130 list_for_each_entry(cdev, &cnic_dev_list, list) {
131 if (netdev == cdev->netdev) {
132 cnic_hold(cdev);
133 read_unlock(&cnic_dev_lock);
134 return cdev;
135 }
136 }
137 read_unlock(&cnic_dev_lock);
138 return NULL;
139}
140
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{
143 struct cnic_local *cp = dev->cnic_priv;
144 struct cnic_eth_dev *ethdev = cp->ethdev;
145 struct drv_ctl_info info;
146 struct drv_ctl_io *io = &info.data.io;
147
148 info.cmd = DRV_CTL_CTX_WR_CMD;
149 io->cid_addr = cid_addr;
150 io->offset = off;
151 io->data = val;
152 ethdev->drv_ctl(dev->netdev, &info);
153}
154
155static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
156{
157 struct cnic_local *cp = dev->cnic_priv;
158 struct cnic_eth_dev *ethdev = cp->ethdev;
159 struct drv_ctl_info info;
160 struct drv_ctl_io *io = &info.data.io;
161
162 info.cmd = DRV_CTL_IO_WR_CMD;
163 io->offset = off;
164 io->data = val;
165 ethdev->drv_ctl(dev->netdev, &info);
166}
167
168static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
169{
170 struct cnic_local *cp = dev->cnic_priv;
171 struct cnic_eth_dev *ethdev = cp->ethdev;
172 struct drv_ctl_info info;
173 struct drv_ctl_io *io = &info.data.io;
174
175 info.cmd = DRV_CTL_IO_RD_CMD;
176 io->offset = off;
177 ethdev->drv_ctl(dev->netdev, &info);
178 return io->data;
179}
180
181static int cnic_in_use(struct cnic_sock *csk)
182{
183 return test_bit(SK_F_INUSE, &csk->flags);
184}
185
186static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
187{
188 struct cnic_local *cp = dev->cnic_priv;
189 struct cnic_eth_dev *ethdev = cp->ethdev;
190 struct drv_ctl_info info;
191
192 info.cmd = DRV_CTL_COMPLETION_CMD;
193 info.data.comp.comp_count = count;
194 ethdev->drv_ctl(dev->netdev, &info);
195}
196
197static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
198 struct cnic_sock *csk)
199{
200 struct iscsi_path path_req;
201 char *buf = NULL;
202 u16 len = 0;
203 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
204 struct cnic_ulp_ops *ulp_ops;
205
206 if (cp->uio_dev == -1)
207 return -ENODEV;
208
209 if (csk) {
210 len = sizeof(path_req);
211 buf = (char *) &path_req;
212 memset(&path_req, 0, len);
213
214 msg_type = ISCSI_KEVENT_PATH_REQ;
215 path_req.handle = (u64) csk->l5_cid;
216 if (test_bit(SK_F_IPV6, &csk->flags)) {
217 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
218 sizeof(struct in6_addr));
219 path_req.ip_addr_len = 16;
220 } else {
221 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
222 sizeof(struct in_addr));
223 path_req.ip_addr_len = 4;
224 }
225 path_req.vlan_id = csk->vlan_id;
226 path_req.pmtu = csk->mtu;
227 }
228
229 rcu_read_lock();
230 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
231 if (ulp_ops)
232 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
233 rcu_read_unlock();
234 return 0;
235}
236
237static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
238 char *buf, u16 len)
239{
240 int rc = -EINVAL;
241
242 switch (msg_type) {
243 case ISCSI_UEVENT_PATH_UPDATE: {
244 struct cnic_local *cp;
245 u32 l5_cid;
246 struct cnic_sock *csk;
247 struct iscsi_path *path_resp;
248
249 if (len < sizeof(*path_resp))
250 break;
251
252 path_resp = (struct iscsi_path *) buf;
253 cp = dev->cnic_priv;
254 l5_cid = (u32) path_resp->handle;
255 if (l5_cid >= MAX_CM_SK_TBL_SZ)
256 break;
257
258 csk = &cp->csk_tbl[l5_cid];
259 csk_hold(csk);
260 if (cnic_in_use(csk)) {
261 memcpy(csk->ha, path_resp->mac_addr, 6);
262 if (test_bit(SK_F_IPV6, &csk->flags))
263 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
264 sizeof(struct in6_addr));
265 else
266 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
267 sizeof(struct in_addr));
268 if (is_valid_ether_addr(csk->ha))
269 cnic_cm_set_pg(csk);
270 }
271 csk_put(csk);
272 rc = 0;
273 }
274 }
275
276 return rc;
277}
278
279static int cnic_offld_prep(struct cnic_sock *csk)
280{
281 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
282 return 0;
283
284 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
285 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
286 return 0;
287 }
288
289 return 1;
290}
291
292static int cnic_close_prep(struct cnic_sock *csk)
293{
294 clear_bit(SK_F_CONNECT_START, &csk->flags);
295 smp_mb__after_clear_bit();
296
297 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
298 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
299 msleep(1);
300
301 return 1;
302 }
303 return 0;
304}
305
306static int cnic_abort_prep(struct cnic_sock *csk)
307{
308 clear_bit(SK_F_CONNECT_START, &csk->flags);
309 smp_mb__after_clear_bit();
310
311 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
312 msleep(1);
313
314 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
315 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
316 return 1;
317 }
318
319 return 0;
320}
321
322int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
323{
324 struct cnic_dev *dev;
325
326 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
327 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
328 ulp_type);
329 return -EINVAL;
330 }
331 mutex_lock(&cnic_lock);
332 if (cnic_ulp_tbl[ulp_type]) {
333 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
334 "been registered\n", ulp_type);
335 mutex_unlock(&cnic_lock);
336 return -EBUSY;
337 }
338
339 read_lock(&cnic_dev_lock);
340 list_for_each_entry(dev, &cnic_dev_list, list) {
341 struct cnic_local *cp = dev->cnic_priv;
342
343 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
344 }
345 read_unlock(&cnic_dev_lock);
346
347 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
348 mutex_unlock(&cnic_lock);
349
350 /* Prevent race conditions with netdev_event */
351 rtnl_lock();
352 read_lock(&cnic_dev_lock);
353 list_for_each_entry(dev, &cnic_dev_list, list) {
354 struct cnic_local *cp = dev->cnic_priv;
355
356 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
357 ulp_ops->cnic_init(dev);
358 }
359 read_unlock(&cnic_dev_lock);
360 rtnl_unlock();
361
362 return 0;
363}
364
365int cnic_unregister_driver(int ulp_type)
366{
367 struct cnic_dev *dev;
368
369 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
370 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
371 ulp_type);
372 return -EINVAL;
373 }
374 mutex_lock(&cnic_lock);
375 if (!cnic_ulp_tbl[ulp_type]) {
376 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
377 "been registered\n", ulp_type);
378 goto out_unlock;
379 }
380 read_lock(&cnic_dev_lock);
381 list_for_each_entry(dev, &cnic_dev_list, list) {
382 struct cnic_local *cp = dev->cnic_priv;
383
384 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
385 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
386 "still has devices registered\n", ulp_type);
387 read_unlock(&cnic_dev_lock);
388 goto out_unlock;
389 }
390 }
391 read_unlock(&cnic_dev_lock);
392
393 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
394
395 mutex_unlock(&cnic_lock);
396 synchronize_rcu();
397 return 0;
398
399out_unlock:
400 mutex_unlock(&cnic_lock);
401 return -EINVAL;
402}
403
404static int cnic_start_hw(struct cnic_dev *);
405static void cnic_stop_hw(struct cnic_dev *);
406
407static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
408 void *ulp_ctx)
409{
410 struct cnic_local *cp = dev->cnic_priv;
411 struct cnic_ulp_ops *ulp_ops;
412
413 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
414 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
415 ulp_type);
416 return -EINVAL;
417 }
418 mutex_lock(&cnic_lock);
419 if (cnic_ulp_tbl[ulp_type] == NULL) {
420 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
421 "has not been registered\n", ulp_type);
422 mutex_unlock(&cnic_lock);
423 return -EAGAIN;
424 }
425 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
426 printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
427 "been registered to this device\n", ulp_type);
428 mutex_unlock(&cnic_lock);
429 return -EBUSY;
430 }
431
432 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
433 cp->ulp_handle[ulp_type] = ulp_ctx;
434 ulp_ops = cnic_ulp_tbl[ulp_type];
435 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
436 cnic_hold(dev);
437
438 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
439 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
440 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
441
442 mutex_unlock(&cnic_lock);
443
444 return 0;
445
446}
447EXPORT_SYMBOL(cnic_register_driver);
448
449static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
450{
451 struct cnic_local *cp = dev->cnic_priv;
452
453 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
454 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
455 ulp_type);
456 return -EINVAL;
457 }
458 mutex_lock(&cnic_lock);
459 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
460 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
461 cnic_put(dev);
462 } else {
463 printk(KERN_ERR PFX "cnic_unregister_device: device not "
464 "registered to this ulp type %d\n", ulp_type);
465 mutex_unlock(&cnic_lock);
466 return -EINVAL;
467 }
468 mutex_unlock(&cnic_lock);
469
470 synchronize_rcu();
471
472 return 0;
473}
474EXPORT_SYMBOL(cnic_unregister_driver);
475
476static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
477{
478 id_tbl->start = start_id;
479 id_tbl->max = size;
480 id_tbl->next = 0;
481 spin_lock_init(&id_tbl->lock);
482 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
483 if (!id_tbl->table)
484 return -ENOMEM;
485
486 return 0;
487}
488
489static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
490{
491 kfree(id_tbl->table);
492 id_tbl->table = NULL;
493}
494
495static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
496{
497 int ret = -1;
498
499 id -= id_tbl->start;
500 if (id >= id_tbl->max)
501 return ret;
502
503 spin_lock(&id_tbl->lock);
504 if (!test_bit(id, id_tbl->table)) {
505 set_bit(id, id_tbl->table);
506 ret = 0;
507 }
508 spin_unlock(&id_tbl->lock);
509 return ret;
510}
511
512/* Returns -1 if not successful */
513static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
514{
515 u32 id;
516
517 spin_lock(&id_tbl->lock);
518 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
519 if (id >= id_tbl->max) {
520 id = -1;
521 if (id_tbl->next != 0) {
522 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
523 if (id >= id_tbl->next)
524 id = -1;
525 }
526 }
527
528 if (id < id_tbl->max) {
529 set_bit(id, id_tbl->table);
530 id_tbl->next = (id + 1) & (id_tbl->max - 1);
531 id += id_tbl->start;
532 }
533
534 spin_unlock(&id_tbl->lock);
535
536 return id;
537}
538
539static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
540{
541 if (id == -1)
542 return;
543
544 id -= id_tbl->start;
545 if (id >= id_tbl->max)
546 return;
547
548 clear_bit(id, id_tbl->table);
549}
550
551static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
552{
553 int i;
554
555 if (!dma->pg_arr)
556 return;
557
558 for (i = 0; i < dma->num_pages; i++) {
559 if (dma->pg_arr[i]) {
560 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
561 dma->pg_arr[i], dma->pg_map_arr[i]);
562 dma->pg_arr[i] = NULL;
563 }
564 }
565 if (dma->pgtbl) {
566 pci_free_consistent(dev->pcidev, dma->pgtbl_size,
567 dma->pgtbl, dma->pgtbl_map);
568 dma->pgtbl = NULL;
569 }
570 kfree(dma->pg_arr);
571 dma->pg_arr = NULL;
572 dma->num_pages = 0;
573}
574
575static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
576{
577 int i;
578 u32 *page_table = dma->pgtbl;
579
580 for (i = 0; i < dma->num_pages; i++) {
581 /* Each entry needs to be in big endian format. */
582 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
583 page_table++;
584 *page_table = (u32) dma->pg_map_arr[i];
585 page_table++;
586 }
587}
588
589static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
590 int pages, int use_pg_tbl)
591{
592 int i, size;
593 struct cnic_local *cp = dev->cnic_priv;
594
595 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
596 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
597 if (dma->pg_arr == NULL)
598 return -ENOMEM;
599
600 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
601 dma->num_pages = pages;
602
603 for (i = 0; i < pages; i++) {
604 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
605 BCM_PAGE_SIZE,
606 &dma->pg_map_arr[i]);
607 if (dma->pg_arr[i] == NULL)
608 goto error;
609 }
610 if (!use_pg_tbl)
611 return 0;
612
613 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
614 ~(BCM_PAGE_SIZE - 1);
615 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
616 &dma->pgtbl_map);
617 if (dma->pgtbl == NULL)
618 goto error;
619
620 cp->setup_pgtbl(dev, dma);
621
622 return 0;
623
624error:
625 cnic_free_dma(dev, dma);
626 return -ENOMEM;
627}
628
629static void cnic_free_resc(struct cnic_dev *dev)
630{
631 struct cnic_local *cp = dev->cnic_priv;
632 int i = 0;
633
634 if (cp->cnic_uinfo) {
635 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
636 while (cp->uio_dev != -1 && i < 15) {
637 msleep(100);
638 i++;
639 }
640 uio_unregister_device(cp->cnic_uinfo);
641 kfree(cp->cnic_uinfo);
642 cp->cnic_uinfo = NULL;
643 }
644
645 if (cp->l2_buf) {
646 pci_free_consistent(dev->pcidev, cp->l2_buf_size,
647 cp->l2_buf, cp->l2_buf_map);
648 cp->l2_buf = NULL;
649 }
650
651 if (cp->l2_ring) {
652 pci_free_consistent(dev->pcidev, cp->l2_ring_size,
653 cp->l2_ring, cp->l2_ring_map);
654 cp->l2_ring = NULL;
655 }
656
657 for (i = 0; i < cp->ctx_blks; i++) {
658 if (cp->ctx_arr[i].ctx) {
659 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
660 cp->ctx_arr[i].ctx,
661 cp->ctx_arr[i].mapping);
662 cp->ctx_arr[i].ctx = NULL;
663 }
664 }
665 kfree(cp->ctx_arr);
666 cp->ctx_arr = NULL;
667 cp->ctx_blks = 0;
668
669 cnic_free_dma(dev, &cp->gbl_buf_info);
670 cnic_free_dma(dev, &cp->conn_buf_info);
671 cnic_free_dma(dev, &cp->kwq_info);
672 cnic_free_dma(dev, &cp->kcq_info);
673 kfree(cp->iscsi_tbl);
674 cp->iscsi_tbl = NULL;
675 kfree(cp->ctx_tbl);
676 cp->ctx_tbl = NULL;
677
678 cnic_free_id_tbl(&cp->cid_tbl);
679}
680
681static int cnic_alloc_context(struct cnic_dev *dev)
682{
683 struct cnic_local *cp = dev->cnic_priv;
684
685 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
686 int i, k, arr_size;
687
688 cp->ctx_blk_size = BCM_PAGE_SIZE;
689 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
690 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
691 sizeof(struct cnic_ctx);
692 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
693 if (cp->ctx_arr == NULL)
694 return -ENOMEM;
695
696 k = 0;
697 for (i = 0; i < 2; i++) {
698 u32 j, reg, off, lo, hi;
699
700 if (i == 0)
701 off = BNX2_PG_CTX_MAP;
702 else
703 off = BNX2_ISCSI_CTX_MAP;
704
705 reg = cnic_reg_rd_ind(dev, off);
706 lo = reg >> 16;
707 hi = reg & 0xffff;
708 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
709 cp->ctx_arr[k].cid = j;
710 }
711
712 cp->ctx_blks = k;
713 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
714 cp->ctx_blks = 0;
715 return -ENOMEM;
716 }
717
718 for (i = 0; i < cp->ctx_blks; i++) {
719 cp->ctx_arr[i].ctx =
720 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
721 &cp->ctx_arr[i].mapping);
722 if (cp->ctx_arr[i].ctx == NULL)
723 return -ENOMEM;
724 }
725 }
726 return 0;
727}
728
729static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
730{
731 struct cnic_local *cp = dev->cnic_priv;
732 struct uio_info *uinfo;
733 int ret;
734
735 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
736 if (ret)
737 goto error;
738 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
739
740 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
741 if (ret)
742 goto error;
743 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
744
745 ret = cnic_alloc_context(dev);
746 if (ret)
747 goto error;
748
749 cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
750 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
751 &cp->l2_ring_map);
752 if (!cp->l2_ring)
753 goto error;
754
755 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
756 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
757 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
758 &cp->l2_buf_map);
759 if (!cp->l2_buf)
760 goto error;
761
762 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
763 if (!uinfo)
764 goto error;
765
766 uinfo->mem[0].addr = dev->netdev->base_addr;
767 uinfo->mem[0].internal_addr = dev->regview;
768 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
769 uinfo->mem[0].memtype = UIO_MEM_PHYS;
770
771 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
772 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
773 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
774 else
775 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
776 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
777
778 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
779 uinfo->mem[2].size = cp->l2_ring_size;
780 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
781
782 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
783 uinfo->mem[3].size = cp->l2_buf_size;
784 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
785
786 uinfo->name = "bnx2_cnic";
787 uinfo->version = CNIC_MODULE_VERSION;
788 uinfo->irq = UIO_IRQ_CUSTOM;
789
790 uinfo->open = cnic_uio_open;
791 uinfo->release = cnic_uio_close;
792
793 uinfo->priv = dev;
794
795 ret = uio_register_device(&dev->pcidev->dev, uinfo);
796 if (ret) {
797 kfree(uinfo);
798 goto error;
799 }
800
801 cp->cnic_uinfo = uinfo;
802
803 return 0;
804
805error:
806 cnic_free_resc(dev);
807 return ret;
808}
809
810static inline u32 cnic_kwq_avail(struct cnic_local *cp)
811{
812 return cp->max_kwq_idx -
813 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
814}
815
816static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
817 u32 num_wqes)
818{
819 struct cnic_local *cp = dev->cnic_priv;
820 struct kwqe *prod_qe;
821 u16 prod, sw_prod, i;
822
823 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
824 return -EAGAIN; /* bnx2 is down */
825
826 spin_lock_bh(&cp->cnic_ulp_lock);
827 if (num_wqes > cnic_kwq_avail(cp) &&
828 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
829 spin_unlock_bh(&cp->cnic_ulp_lock);
830 return -EAGAIN;
831 }
832
833 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
834
835 prod = cp->kwq_prod_idx;
836 sw_prod = prod & MAX_KWQ_IDX;
837 for (i = 0; i < num_wqes; i++) {
838 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
839 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
840 prod++;
841 sw_prod = prod & MAX_KWQ_IDX;
842 }
843 cp->kwq_prod_idx = prod;
844
845 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
846
847 spin_unlock_bh(&cp->cnic_ulp_lock);
848 return 0;
849}
850
851static void service_kcqes(struct cnic_dev *dev, int num_cqes)
852{
853 struct cnic_local *cp = dev->cnic_priv;
854 int i, j;
855
856 i = 0;
857 j = 1;
858 while (num_cqes) {
859 struct cnic_ulp_ops *ulp_ops;
860 int ulp_type;
861 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
862 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
863
864 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
865 cnic_kwq_completion(dev, 1);
866
867 while (j < num_cqes) {
868 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
869
870 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
871 break;
872
873 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
874 cnic_kwq_completion(dev, 1);
875 j++;
876 }
877
878 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
879 ulp_type = CNIC_ULP_RDMA;
880 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
881 ulp_type = CNIC_ULP_ISCSI;
882 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
883 ulp_type = CNIC_ULP_L4;
884 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
885 goto end;
886 else {
887 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
888 dev->netdev->name, kcqe_op_flag);
889 goto end;
890 }
891
892 rcu_read_lock();
893 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
894 if (likely(ulp_ops)) {
895 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
896 cp->completed_kcq + i, j);
897 }
898 rcu_read_unlock();
899end:
900 num_cqes -= j;
901 i += j;
902 j = 1;
903 }
904 return;
905}
906
907static u16 cnic_bnx2_next_idx(u16 idx)
908{
909 return idx + 1;
910}
911
912static u16 cnic_bnx2_hw_idx(u16 idx)
913{
914 return idx;
915}
916
917static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
918{
919 struct cnic_local *cp = dev->cnic_priv;
920 u16 i, ri, last;
921 struct kcqe *kcqe;
922 int kcqe_cnt = 0, last_cnt = 0;
923
924 i = ri = last = *sw_prod;
925 ri &= MAX_KCQ_IDX;
926
927 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
928 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
929 cp->completed_kcq[kcqe_cnt++] = kcqe;
930 i = cp->next_idx(i);
931 ri = i & MAX_KCQ_IDX;
932 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
933 last_cnt = kcqe_cnt;
934 last = i;
935 }
936 }
937
938 *sw_prod = last;
939 return last_cnt;
940}
941
942static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
943{
944 u16 rx_cons = *cp->rx_cons_ptr;
945 u16 tx_cons = *cp->tx_cons_ptr;
946
947 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
948 cp->tx_cons = tx_cons;
949 cp->rx_cons = rx_cons;
950 uio_event_notify(cp->cnic_uinfo);
951 }
952}
953
954static int cnic_service_bnx2(void *data, void *status_blk)
955{
956 struct cnic_dev *dev = data;
957 struct status_block *sblk = status_blk;
958 struct cnic_local *cp = dev->cnic_priv;
959 u32 status_idx = sblk->status_idx;
960 u16 hw_prod, sw_prod;
961 int kcqe_cnt;
962
963 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
964 return status_idx;
965
966 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
967
968 hw_prod = sblk->status_completion_producer_index;
969 sw_prod = cp->kcq_prod_idx;
970 while (sw_prod != hw_prod) {
971 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
972 if (kcqe_cnt == 0)
973 goto done;
974
975 service_kcqes(dev, kcqe_cnt);
976
977 /* Tell compiler that status_blk fields can change. */
978 barrier();
979 if (status_idx != sblk->status_idx) {
980 status_idx = sblk->status_idx;
981 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
982 hw_prod = sblk->status_completion_producer_index;
983 } else
984 break;
985 }
986
987done:
988 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
989
990 cp->kcq_prod_idx = sw_prod;
991
992 cnic_chk_bnx2_pkt_rings(cp);
993 return status_idx;
994}
995
996static void cnic_service_bnx2_msix(unsigned long data)
997{
998 struct cnic_dev *dev = (struct cnic_dev *) data;
999 struct cnic_local *cp = dev->cnic_priv;
1000 struct status_block_msix *status_blk = cp->bnx2_status_blk;
1001 u32 status_idx = status_blk->status_idx;
1002 u16 hw_prod, sw_prod;
1003 int kcqe_cnt;
1004
1005 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1006
1007 hw_prod = status_blk->status_completion_producer_index;
1008 sw_prod = cp->kcq_prod_idx;
1009 while (sw_prod != hw_prod) {
1010 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
1011 if (kcqe_cnt == 0)
1012 goto done;
1013
1014 service_kcqes(dev, kcqe_cnt);
1015
1016 /* Tell compiler that status_blk fields can change. */
1017 barrier();
1018 if (status_idx != status_blk->status_idx) {
1019 status_idx = status_blk->status_idx;
1020 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1021 hw_prod = status_blk->status_completion_producer_index;
1022 } else
1023 break;
1024 }
1025
1026done:
1027 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1028 cp->kcq_prod_idx = sw_prod;
1029
1030 cnic_chk_bnx2_pkt_rings(cp);
1031
1032 cp->last_status_idx = status_idx;
1033 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
1034 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
1035}
1036
1037static irqreturn_t cnic_irq(int irq, void *dev_instance)
1038{
1039 struct cnic_dev *dev = dev_instance;
1040 struct cnic_local *cp = dev->cnic_priv;
1041 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
1042
1043 if (cp->ack_int)
1044 cp->ack_int(dev);
1045
1046 prefetch(cp->status_blk);
1047 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
1048
1049 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
1050 tasklet_schedule(&cp->cnic_irq_task);
1051
1052 return IRQ_HANDLED;
1053}
1054
1055static void cnic_ulp_stop(struct cnic_dev *dev)
1056{
1057 struct cnic_local *cp = dev->cnic_priv;
1058 int if_type;
1059
1060 rcu_read_lock();
1061 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1062 struct cnic_ulp_ops *ulp_ops;
1063
1064 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1065 if (!ulp_ops)
1066 continue;
1067
1068 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1069 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1070 }
1071 rcu_read_unlock();
1072}
1073
1074static void cnic_ulp_start(struct cnic_dev *dev)
1075{
1076 struct cnic_local *cp = dev->cnic_priv;
1077 int if_type;
1078
1079 rcu_read_lock();
1080 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1081 struct cnic_ulp_ops *ulp_ops;
1082
1083 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1084 if (!ulp_ops || !ulp_ops->cnic_start)
1085 continue;
1086
1087 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1088 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1089 }
1090 rcu_read_unlock();
1091}
1092
1093static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1094{
1095 struct cnic_dev *dev = data;
1096
1097 switch (info->cmd) {
1098 case CNIC_CTL_STOP_CMD:
1099 cnic_hold(dev);
1100 mutex_lock(&cnic_lock);
1101
1102 cnic_ulp_stop(dev);
1103 cnic_stop_hw(dev);
1104
1105 mutex_unlock(&cnic_lock);
1106 cnic_put(dev);
1107 break;
1108 case CNIC_CTL_START_CMD:
1109 cnic_hold(dev);
1110 mutex_lock(&cnic_lock);
1111
1112 if (!cnic_start_hw(dev))
1113 cnic_ulp_start(dev);
1114
1115 mutex_unlock(&cnic_lock);
1116 cnic_put(dev);
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121 return 0;
1122}
1123
1124static void cnic_ulp_init(struct cnic_dev *dev)
1125{
1126 int i;
1127 struct cnic_local *cp = dev->cnic_priv;
1128
1129 rcu_read_lock();
1130 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1131 struct cnic_ulp_ops *ulp_ops;
1132
1133 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1134 if (!ulp_ops || !ulp_ops->cnic_init)
1135 continue;
1136
1137 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1138 ulp_ops->cnic_init(dev);
1139
1140 }
1141 rcu_read_unlock();
1142}
1143
1144static void cnic_ulp_exit(struct cnic_dev *dev)
1145{
1146 int i;
1147 struct cnic_local *cp = dev->cnic_priv;
1148
1149 rcu_read_lock();
1150 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1151 struct cnic_ulp_ops *ulp_ops;
1152
1153 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1154 if (!ulp_ops || !ulp_ops->cnic_exit)
1155 continue;
1156
1157 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1158 ulp_ops->cnic_exit(dev);
1159
1160 }
1161 rcu_read_unlock();
1162}
1163
1164static int cnic_cm_offload_pg(struct cnic_sock *csk)
1165{
1166 struct cnic_dev *dev = csk->dev;
1167 struct l4_kwq_offload_pg *l4kwqe;
1168 struct kwqe *wqes[1];
1169
1170 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
1171 memset(l4kwqe, 0, sizeof(*l4kwqe));
1172 wqes[0] = (struct kwqe *) l4kwqe;
1173
1174 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
1175 l4kwqe->flags =
1176 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
1177 l4kwqe->l2hdr_nbytes = ETH_HLEN;
1178
1179 l4kwqe->da0 = csk->ha[0];
1180 l4kwqe->da1 = csk->ha[1];
1181 l4kwqe->da2 = csk->ha[2];
1182 l4kwqe->da3 = csk->ha[3];
1183 l4kwqe->da4 = csk->ha[4];
1184 l4kwqe->da5 = csk->ha[5];
1185
1186 l4kwqe->sa0 = dev->mac_addr[0];
1187 l4kwqe->sa1 = dev->mac_addr[1];
1188 l4kwqe->sa2 = dev->mac_addr[2];
1189 l4kwqe->sa3 = dev->mac_addr[3];
1190 l4kwqe->sa4 = dev->mac_addr[4];
1191 l4kwqe->sa5 = dev->mac_addr[5];
1192
1193 l4kwqe->etype = ETH_P_IP;
1194 l4kwqe->ipid_count = DEF_IPID_COUNT;
1195 l4kwqe->host_opaque = csk->l5_cid;
1196
1197 if (csk->vlan_id) {
1198 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
1199 l4kwqe->vlan_tag = csk->vlan_id;
1200 l4kwqe->l2hdr_nbytes += 4;
1201 }
1202
1203 return dev->submit_kwqes(dev, wqes, 1);
1204}
1205
1206static int cnic_cm_update_pg(struct cnic_sock *csk)
1207{
1208 struct cnic_dev *dev = csk->dev;
1209 struct l4_kwq_update_pg *l4kwqe;
1210 struct kwqe *wqes[1];
1211
1212 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
1213 memset(l4kwqe, 0, sizeof(*l4kwqe));
1214 wqes[0] = (struct kwqe *) l4kwqe;
1215
1216 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
1217 l4kwqe->flags =
1218 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
1219 l4kwqe->pg_cid = csk->pg_cid;
1220
1221 l4kwqe->da0 = csk->ha[0];
1222 l4kwqe->da1 = csk->ha[1];
1223 l4kwqe->da2 = csk->ha[2];
1224 l4kwqe->da3 = csk->ha[3];
1225 l4kwqe->da4 = csk->ha[4];
1226 l4kwqe->da5 = csk->ha[5];
1227
1228 l4kwqe->pg_host_opaque = csk->l5_cid;
1229 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
1230
1231 return dev->submit_kwqes(dev, wqes, 1);
1232}
1233
1234static int cnic_cm_upload_pg(struct cnic_sock *csk)
1235{
1236 struct cnic_dev *dev = csk->dev;
1237 struct l4_kwq_upload *l4kwqe;
1238 struct kwqe *wqes[1];
1239
1240 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
1241 memset(l4kwqe, 0, sizeof(*l4kwqe));
1242 wqes[0] = (struct kwqe *) l4kwqe;
1243
1244 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
1245 l4kwqe->flags =
1246 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
1247 l4kwqe->cid = csk->pg_cid;
1248
1249 return dev->submit_kwqes(dev, wqes, 1);
1250}
1251
1252static int cnic_cm_conn_req(struct cnic_sock *csk)
1253{
1254 struct cnic_dev *dev = csk->dev;
1255 struct l4_kwq_connect_req1 *l4kwqe1;
1256 struct l4_kwq_connect_req2 *l4kwqe2;
1257 struct l4_kwq_connect_req3 *l4kwqe3;
1258 struct kwqe *wqes[3];
1259 u8 tcp_flags = 0;
1260 int num_wqes = 2;
1261
1262 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
1263 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
1264 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
1265 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
1266 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
1267 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
1268
1269 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
1270 l4kwqe3->flags =
1271 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
1272 l4kwqe3->ka_timeout = csk->ka_timeout;
1273 l4kwqe3->ka_interval = csk->ka_interval;
1274 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
1275 l4kwqe3->tos = csk->tos;
1276 l4kwqe3->ttl = csk->ttl;
1277 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
1278 l4kwqe3->pmtu = csk->mtu;
1279 l4kwqe3->rcv_buf = csk->rcv_buf;
1280 l4kwqe3->snd_buf = csk->snd_buf;
1281 l4kwqe3->seed = csk->seed;
1282
1283 wqes[0] = (struct kwqe *) l4kwqe1;
1284 if (test_bit(SK_F_IPV6, &csk->flags)) {
1285 wqes[1] = (struct kwqe *) l4kwqe2;
1286 wqes[2] = (struct kwqe *) l4kwqe3;
1287 num_wqes = 3;
1288
1289 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
1290 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
1291 l4kwqe2->flags =
1292 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
1293 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
1294 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
1295 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
1296 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
1297 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
1298 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
1299 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
1300 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
1301 sizeof(struct tcphdr);
1302 } else {
1303 wqes[1] = (struct kwqe *) l4kwqe3;
1304 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
1305 sizeof(struct tcphdr);
1306 }
1307
1308 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
1309 l4kwqe1->flags =
1310 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
1311 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
1312 l4kwqe1->cid = csk->cid;
1313 l4kwqe1->pg_cid = csk->pg_cid;
1314 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
1315 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
1316 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
1317 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
1318 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
1319 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
1320 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
1321 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
1322 if (csk->tcp_flags & SK_TCP_NAGLE)
1323 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
1324 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
1325 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
1326 if (csk->tcp_flags & SK_TCP_SACK)
1327 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
1328 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
1329 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
1330
1331 l4kwqe1->tcp_flags = tcp_flags;
1332
1333 return dev->submit_kwqes(dev, wqes, num_wqes);
1334}
1335
1336static int cnic_cm_close_req(struct cnic_sock *csk)
1337{
1338 struct cnic_dev *dev = csk->dev;
1339 struct l4_kwq_close_req *l4kwqe;
1340 struct kwqe *wqes[1];
1341
1342 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
1343 memset(l4kwqe, 0, sizeof(*l4kwqe));
1344 wqes[0] = (struct kwqe *) l4kwqe;
1345
1346 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
1347 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
1348 l4kwqe->cid = csk->cid;
1349
1350 return dev->submit_kwqes(dev, wqes, 1);
1351}
1352
1353static int cnic_cm_abort_req(struct cnic_sock *csk)
1354{
1355 struct cnic_dev *dev = csk->dev;
1356 struct l4_kwq_reset_req *l4kwqe;
1357 struct kwqe *wqes[1];
1358
1359 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
1360 memset(l4kwqe, 0, sizeof(*l4kwqe));
1361 wqes[0] = (struct kwqe *) l4kwqe;
1362
1363 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
1364 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
1365 l4kwqe->cid = csk->cid;
1366
1367 return dev->submit_kwqes(dev, wqes, 1);
1368}
1369
1370static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
1371 u32 l5_cid, struct cnic_sock **csk, void *context)
1372{
1373 struct cnic_local *cp = dev->cnic_priv;
1374 struct cnic_sock *csk1;
1375
1376 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1377 return -EINVAL;
1378
1379 csk1 = &cp->csk_tbl[l5_cid];
1380 if (atomic_read(&csk1->ref_count))
1381 return -EAGAIN;
1382
1383 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
1384 return -EBUSY;
1385
1386 csk1->dev = dev;
1387 csk1->cid = cid;
1388 csk1->l5_cid = l5_cid;
1389 csk1->ulp_type = ulp_type;
1390 csk1->context = context;
1391
1392 csk1->ka_timeout = DEF_KA_TIMEOUT;
1393 csk1->ka_interval = DEF_KA_INTERVAL;
1394 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
1395 csk1->tos = DEF_TOS;
1396 csk1->ttl = DEF_TTL;
1397 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
1398 csk1->rcv_buf = DEF_RCV_BUF;
1399 csk1->snd_buf = DEF_SND_BUF;
1400 csk1->seed = DEF_SEED;
1401
1402 *csk = csk1;
1403 return 0;
1404}
1405
1406static void cnic_cm_cleanup(struct cnic_sock *csk)
1407{
1408 if (csk->src_port) {
1409 struct cnic_dev *dev = csk->dev;
1410 struct cnic_local *cp = dev->cnic_priv;
1411
1412 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
1413 csk->src_port = 0;
1414 }
1415}
1416
1417static void cnic_close_conn(struct cnic_sock *csk)
1418{
1419 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
1420 cnic_cm_upload_pg(csk);
1421 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1422 }
1423 cnic_cm_cleanup(csk);
1424}
1425
1426static int cnic_cm_destroy(struct cnic_sock *csk)
1427{
1428 if (!cnic_in_use(csk))
1429 return -EINVAL;
1430
1431 csk_hold(csk);
1432 clear_bit(SK_F_INUSE, &csk->flags);
1433 smp_mb__after_clear_bit();
1434 while (atomic_read(&csk->ref_count) != 1)
1435 msleep(1);
1436 cnic_cm_cleanup(csk);
1437
1438 csk->flags = 0;
1439 csk_put(csk);
1440 return 0;
1441}
1442
1443static inline u16 cnic_get_vlan(struct net_device *dev,
1444 struct net_device **vlan_dev)
1445{
1446 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1447 *vlan_dev = vlan_dev_real_dev(dev);
1448 return vlan_dev_vlan_id(dev);
1449 }
1450 *vlan_dev = dev;
1451 return 0;
1452}
1453
1454static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
1455 struct dst_entry **dst)
1456{
1457 struct flowi fl;
1458 int err;
1459 struct rtable *rt;
1460
1461 memset(&fl, 0, sizeof(fl));
1462 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
1463
1464 err = ip_route_output_key(&init_net, &rt, &fl);
1465 if (!err)
1466 *dst = &rt->u.dst;
1467 return err;
1468}
1469
1470static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
1471 struct dst_entry **dst)
1472{
1473#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1474 struct flowi fl;
1475
1476 memset(&fl, 0, sizeof(fl));
1477 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
1478 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
1479 fl.oif = dst_addr->sin6_scope_id;
1480
1481 *dst = ip6_route_output(&init_net, NULL, &fl);
1482 if (*dst)
1483 return 0;
1484#endif
1485
1486 return -ENETUNREACH;
1487}
1488
1489static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
1490 int ulp_type)
1491{
1492 struct cnic_dev *dev = NULL;
1493 struct dst_entry *dst;
1494 struct net_device *netdev = NULL;
1495 int err = -ENETUNREACH;
1496
1497 if (dst_addr->sin_family == AF_INET)
1498 err = cnic_get_v4_route(dst_addr, &dst);
1499 else if (dst_addr->sin_family == AF_INET6) {
1500 struct sockaddr_in6 *dst_addr6 =
1501 (struct sockaddr_in6 *) dst_addr;
1502
1503 err = cnic_get_v6_route(dst_addr6, &dst);
1504 } else
1505 return NULL;
1506
1507 if (err)
1508 return NULL;
1509
1510 if (!dst->dev)
1511 goto done;
1512
1513 cnic_get_vlan(dst->dev, &netdev);
1514
1515 dev = cnic_from_netdev(netdev);
1516
1517done:
1518 dst_release(dst);
1519 if (dev)
1520 cnic_put(dev);
1521 return dev;
1522}
1523
1524static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1525{
1526 struct cnic_dev *dev = csk->dev;
1527 struct cnic_local *cp = dev->cnic_priv;
1528
1529 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
1530}
1531
1532static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1533{
1534 struct cnic_dev *dev = csk->dev;
1535 struct cnic_local *cp = dev->cnic_priv;
1536 int is_v6, err, rc = -ENETUNREACH;
1537 struct dst_entry *dst;
1538 struct net_device *realdev;
1539 u32 local_port;
1540
1541 if (saddr->local.v6.sin6_family == AF_INET6 &&
1542 saddr->remote.v6.sin6_family == AF_INET6)
1543 is_v6 = 1;
1544 else if (saddr->local.v4.sin_family == AF_INET &&
1545 saddr->remote.v4.sin_family == AF_INET)
1546 is_v6 = 0;
1547 else
1548 return -EINVAL;
1549
1550 clear_bit(SK_F_IPV6, &csk->flags);
1551
1552 if (is_v6) {
1553#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1554 set_bit(SK_F_IPV6, &csk->flags);
1555 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
1556 if (err)
1557 return err;
1558
1559 if (!dst || dst->error || !dst->dev)
1560 goto err_out;
1561
1562 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
1563 sizeof(struct in6_addr));
1564 csk->dst_port = saddr->remote.v6.sin6_port;
1565 local_port = saddr->local.v6.sin6_port;
1566#else
1567 return rc;
1568#endif
1569
1570 } else {
1571 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
1572 if (err)
1573 return err;
1574
1575 if (!dst || dst->error || !dst->dev)
1576 goto err_out;
1577
1578 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
1579 csk->dst_port = saddr->remote.v4.sin_port;
1580 local_port = saddr->local.v4.sin_port;
1581 }
1582
1583 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
1584 if (realdev != dev->netdev)
1585 goto err_out;
1586
1587 if (local_port >= CNIC_LOCAL_PORT_MIN &&
1588 local_port < CNIC_LOCAL_PORT_MAX) {
1589 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
1590 local_port = 0;
1591 } else
1592 local_port = 0;
1593
1594 if (!local_port) {
1595 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
1596 if (local_port == -1) {
1597 rc = -ENOMEM;
1598 goto err_out;
1599 }
1600 }
1601 csk->src_port = local_port;
1602
1603 csk->mtu = dst_mtu(dst);
1604 rc = 0;
1605
1606err_out:
1607 dst_release(dst);
1608 return rc;
1609}
1610
1611static void cnic_init_csk_state(struct cnic_sock *csk)
1612{
1613 csk->state = 0;
1614 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1615 clear_bit(SK_F_CLOSING, &csk->flags);
1616}
1617
1618static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1619{
1620 int err = 0;
1621
1622 if (!cnic_in_use(csk))
1623 return -EINVAL;
1624
1625 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
1626 return -EINVAL;
1627
1628 cnic_init_csk_state(csk);
1629
1630 err = cnic_get_route(csk, saddr);
1631 if (err)
1632 goto err_out;
1633
1634 err = cnic_resolve_addr(csk, saddr);
1635 if (!err)
1636 return 0;
1637
1638err_out:
1639 clear_bit(SK_F_CONNECT_START, &csk->flags);
1640 return err;
1641}
1642
1643static int cnic_cm_abort(struct cnic_sock *csk)
1644{
1645 struct cnic_local *cp = csk->dev->cnic_priv;
1646 u32 opcode;
1647
1648 if (!cnic_in_use(csk))
1649 return -EINVAL;
1650
1651 if (cnic_abort_prep(csk))
1652 return cnic_cm_abort_req(csk);
1653
1654 /* Getting here means that we haven't started connect, or
1655 * connect was not successful.
1656 */
1657
1658 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1659 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1660 opcode = csk->state;
1661 else
1662 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
1663 cp->close_conn(csk, opcode);
1664
1665 return 0;
1666}
1667
1668static int cnic_cm_close(struct cnic_sock *csk)
1669{
1670 if (!cnic_in_use(csk))
1671 return -EINVAL;
1672
1673 if (cnic_close_prep(csk)) {
1674 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
1675 return cnic_cm_close_req(csk);
1676 }
1677 return 0;
1678}
1679
1680static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
1681 u8 opcode)
1682{
1683 struct cnic_ulp_ops *ulp_ops;
1684 int ulp_type = csk->ulp_type;
1685
1686 rcu_read_lock();
1687 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1688 if (ulp_ops) {
1689 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
1690 ulp_ops->cm_connect_complete(csk);
1691 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
1692 ulp_ops->cm_close_complete(csk);
1693 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
1694 ulp_ops->cm_remote_abort(csk);
1695 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
1696 ulp_ops->cm_abort_complete(csk);
1697 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
1698 ulp_ops->cm_remote_close(csk);
1699 }
1700 rcu_read_unlock();
1701}
1702
1703static int cnic_cm_set_pg(struct cnic_sock *csk)
1704{
1705 if (cnic_offld_prep(csk)) {
1706 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1707 cnic_cm_update_pg(csk);
1708 else
1709 cnic_cm_offload_pg(csk);
1710 }
1711 return 0;
1712}
1713
1714static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
1715{
1716 struct cnic_local *cp = dev->cnic_priv;
1717 u32 l5_cid = kcqe->pg_host_opaque;
1718 u8 opcode = kcqe->op_code;
1719 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1720
1721 csk_hold(csk);
1722 if (!cnic_in_use(csk))
1723 goto done;
1724
1725 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1726 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1727 goto done;
1728 }
1729 csk->pg_cid = kcqe->pg_cid;
1730 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1731 cnic_cm_conn_req(csk);
1732
1733done:
1734 csk_put(csk);
1735}
1736
1737static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1738{
1739 struct cnic_local *cp = dev->cnic_priv;
1740 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
1741 u8 opcode = l4kcqe->op_code;
1742 u32 l5_cid;
1743 struct cnic_sock *csk;
1744
1745 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
1746 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1747 cnic_cm_process_offld_pg(dev, l4kcqe);
1748 return;
1749 }
1750
1751 l5_cid = l4kcqe->conn_id;
1752 if (opcode & 0x80)
1753 l5_cid = l4kcqe->cid;
1754 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1755 return;
1756
1757 csk = &cp->csk_tbl[l5_cid];
1758 csk_hold(csk);
1759
1760 if (!cnic_in_use(csk)) {
1761 csk_put(csk);
1762 return;
1763 }
1764
1765 switch (opcode) {
1766 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
1767 if (l4kcqe->status == 0)
1768 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
1769
1770 smp_mb__before_clear_bit();
1771 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1772 cnic_cm_upcall(cp, csk, opcode);
1773 break;
1774
1775 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
1776 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
1777 csk->state = opcode;
1778 /* fall through */
1779 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1780 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
1781 cp->close_conn(csk, opcode);
1782 break;
1783
1784 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
1785 cnic_cm_upcall(cp, csk, opcode);
1786 break;
1787 }
1788 csk_put(csk);
1789}
1790
1791static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
1792{
1793 struct cnic_dev *dev = data;
1794 int i;
1795
1796 for (i = 0; i < num; i++)
1797 cnic_cm_process_kcqe(dev, kcqe[i]);
1798}
1799
1800static struct cnic_ulp_ops cm_ulp_ops = {
1801 .indicate_kcqes = cnic_cm_indicate_kcqe,
1802};
1803
1804static void cnic_cm_free_mem(struct cnic_dev *dev)
1805{
1806 struct cnic_local *cp = dev->cnic_priv;
1807
1808 kfree(cp->csk_tbl);
1809 cp->csk_tbl = NULL;
1810 cnic_free_id_tbl(&cp->csk_port_tbl);
1811}
1812
1813static int cnic_cm_alloc_mem(struct cnic_dev *dev)
1814{
1815 struct cnic_local *cp = dev->cnic_priv;
1816
1817 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
1818 GFP_KERNEL);
1819 if (!cp->csk_tbl)
1820 return -ENOMEM;
1821
1822 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
1823 CNIC_LOCAL_PORT_MIN)) {
1824 cnic_cm_free_mem(dev);
1825 return -ENOMEM;
1826 }
1827 return 0;
1828}
1829
1830static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
1831{
1832 if ((opcode == csk->state) ||
1833 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
1834 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
1835 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
1836 return 1;
1837 }
1838 return 0;
1839}
1840
1841static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
1842{
1843 struct cnic_dev *dev = csk->dev;
1844 struct cnic_local *cp = dev->cnic_priv;
1845
1846 clear_bit(SK_F_CONNECT_START, &csk->flags);
1847 if (cnic_ready_to_close(csk, opcode)) {
1848 cnic_close_conn(csk);
1849 cnic_cm_upcall(cp, csk, opcode);
1850 }
1851}
1852
1853static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
1854{
1855}
1856
1857static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1858{
1859 u32 seed;
1860
1861 get_random_bytes(&seed, 4);
1862 cnic_ctx_wr(dev, 45, 0, seed);
1863 return 0;
1864}
1865
1866static int cnic_cm_open(struct cnic_dev *dev)
1867{
1868 struct cnic_local *cp = dev->cnic_priv;
1869 int err;
1870
1871 err = cnic_cm_alloc_mem(dev);
1872 if (err)
1873 return err;
1874
1875 err = cp->start_cm(dev);
1876
1877 if (err)
1878 goto err_out;
1879
1880 dev->cm_create = cnic_cm_create;
1881 dev->cm_destroy = cnic_cm_destroy;
1882 dev->cm_connect = cnic_cm_connect;
1883 dev->cm_abort = cnic_cm_abort;
1884 dev->cm_close = cnic_cm_close;
1885 dev->cm_select_dev = cnic_cm_select_dev;
1886
1887 cp->ulp_handle[CNIC_ULP_L4] = dev;
1888 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
1889 return 0;
1890
1891err_out:
1892 cnic_cm_free_mem(dev);
1893 return err;
1894}
1895
1896static int cnic_cm_shutdown(struct cnic_dev *dev)
1897{
1898 struct cnic_local *cp = dev->cnic_priv;
1899 int i;
1900
1901 cp->stop_cm(dev);
1902
1903 if (!cp->csk_tbl)
1904 return 0;
1905
1906 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
1907 struct cnic_sock *csk = &cp->csk_tbl[i];
1908
1909 clear_bit(SK_F_INUSE, &csk->flags);
1910 cnic_cm_cleanup(csk);
1911 }
1912 cnic_cm_free_mem(dev);
1913
1914 return 0;
1915}
1916
1917static void cnic_init_context(struct cnic_dev *dev, u32 cid)
1918{
1919 struct cnic_local *cp = dev->cnic_priv;
1920 u32 cid_addr;
1921 int i;
1922
1923 if (CHIP_NUM(cp) == CHIP_NUM_5709)
1924 return;
1925
1926 cid_addr = GET_CID_ADDR(cid);
1927
1928 for (i = 0; i < CTX_SIZE; i += 4)
1929 cnic_ctx_wr(dev, cid_addr, i, 0);
1930}
1931
1932static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
1933{
1934 struct cnic_local *cp = dev->cnic_priv;
1935 int ret = 0, i;
1936 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
1937
1938 if (CHIP_NUM(cp) != CHIP_NUM_5709)
1939 return 0;
1940
1941 for (i = 0; i < cp->ctx_blks; i++) {
1942 int j;
1943 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
1944 u32 val;
1945
1946 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
1947
1948 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1949 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
1950 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1951 (u64) cp->ctx_arr[i].mapping >> 32);
1952 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
1953 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1954 for (j = 0; j < 10; j++) {
1955
1956 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1957 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1958 break;
1959 udelay(5);
1960 }
1961 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1962 ret = -EBUSY;
1963 break;
1964 }
1965 }
1966 return ret;
1967}
1968
1969static void cnic_free_irq(struct cnic_dev *dev)
1970{
1971 struct cnic_local *cp = dev->cnic_priv;
1972 struct cnic_eth_dev *ethdev = cp->ethdev;
1973
1974 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1975 cp->disable_int_sync(dev);
1976 tasklet_disable(&cp->cnic_irq_task);
1977 free_irq(ethdev->irq_arr[0].vector, dev);
1978 }
1979}
1980
1981static int cnic_init_bnx2_irq(struct cnic_dev *dev)
1982{
1983 struct cnic_local *cp = dev->cnic_priv;
1984 struct cnic_eth_dev *ethdev = cp->ethdev;
1985
1986 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1987 int err, i = 0;
1988 int sblk_num = cp->status_blk_num;
1989 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
1990 BNX2_HC_SB_CONFIG_1;
1991
1992 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
1993
1994 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
1995 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
1996 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
1997
1998 cp->bnx2_status_blk = cp->status_blk;
1999 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2000 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
2001 (unsigned long) dev);
2002 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2003 "cnic", dev);
2004 if (err) {
2005 tasklet_disable(&cp->cnic_irq_task);
2006 return err;
2007 }
2008 while (cp->bnx2_status_blk->status_completion_producer_index &&
2009 i < 10) {
2010 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
2011 1 << (11 + sblk_num));
2012 udelay(10);
2013 i++;
2014 barrier();
2015 }
2016 if (cp->bnx2_status_blk->status_completion_producer_index) {
2017 cnic_free_irq(dev);
2018 goto failed;
2019 }
2020
2021 } else {
2022 struct status_block *sblk = cp->status_blk;
2023 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
2024 int i = 0;
2025
2026 while (sblk->status_completion_producer_index && i < 10) {
2027 CNIC_WR(dev, BNX2_HC_COMMAND,
2028 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2029 udelay(10);
2030 i++;
2031 barrier();
2032 }
2033 if (sblk->status_completion_producer_index)
2034 goto failed;
2035
2036 }
2037 return 0;
2038
2039failed:
2040 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
2041 dev->netdev->name);
2042 return -EBUSY;
2043}
2044
2045static void cnic_enable_bnx2_int(struct cnic_dev *dev)
2046{
2047 struct cnic_local *cp = dev->cnic_priv;
2048 struct cnic_eth_dev *ethdev = cp->ethdev;
2049
2050 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2051 return;
2052
2053 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2054 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2055}
2056
2057static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
2058{
2059 struct cnic_local *cp = dev->cnic_priv;
2060 struct cnic_eth_dev *ethdev = cp->ethdev;
2061
2062 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2063 return;
2064
2065 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2066 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2067 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
2068 synchronize_irq(ethdev->irq_arr[0].vector);
2069}
2070
2071static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
2072{
2073 struct cnic_local *cp = dev->cnic_priv;
2074 struct cnic_eth_dev *ethdev = cp->ethdev;
2075 u32 cid_addr, tx_cid, sb_id;
2076 u32 val, offset0, offset1, offset2, offset3;
2077 int i;
2078 struct tx_bd *txbd;
2079 dma_addr_t buf_map;
2080 struct status_block *s_blk = cp->status_blk;
2081
2082 sb_id = cp->status_blk_num;
2083 tx_cid = 20;
2084 cnic_init_context(dev, tx_cid);
2085 cnic_init_context(dev, tx_cid + 1);
2086 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
2087 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2088 struct status_block_msix *sblk = cp->status_blk;
2089
2090 tx_cid = TX_TSS_CID + sb_id - 1;
2091 cnic_init_context(dev, tx_cid);
2092 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
2093 (TX_TSS_CID << 7));
2094 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
2095 }
2096 cp->tx_cons = *cp->tx_cons_ptr;
2097
2098 cid_addr = GET_CID_ADDR(tx_cid);
2099 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
2100 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
2101
2102 for (i = 0; i < PHY_CTX_SIZE; i += 4)
2103 cnic_ctx_wr(dev, cid_addr2, i, 0);
2104
2105 offset0 = BNX2_L2CTX_TYPE_XI;
2106 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
2107 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
2108 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
2109 } else {
2110 offset0 = BNX2_L2CTX_TYPE;
2111 offset1 = BNX2_L2CTX_CMD_TYPE;
2112 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
2113 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
2114 }
2115 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
2116 cnic_ctx_wr(dev, cid_addr, offset0, val);
2117
2118 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
2119 cnic_ctx_wr(dev, cid_addr, offset1, val);
2120
2121 txbd = (struct tx_bd *) cp->l2_ring;
2122
2123 buf_map = cp->l2_buf_map;
2124 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
2125 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
2126 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2127 }
2128 val = (u64) cp->l2_ring_map >> 32;
2129 cnic_ctx_wr(dev, cid_addr, offset2, val);
2130 txbd->tx_bd_haddr_hi = val;
2131
2132 val = (u64) cp->l2_ring_map & 0xffffffff;
2133 cnic_ctx_wr(dev, cid_addr, offset3, val);
2134 txbd->tx_bd_haddr_lo = val;
2135}
2136
2137static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2138{
2139 struct cnic_local *cp = dev->cnic_priv;
2140 struct cnic_eth_dev *ethdev = cp->ethdev;
2141 u32 cid_addr, sb_id, val, coal_reg, coal_val;
2142 int i;
2143 struct rx_bd *rxbd;
2144 struct status_block *s_blk = cp->status_blk;
2145
2146 sb_id = cp->status_blk_num;
2147 cnic_init_context(dev, 2);
2148 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
2149 coal_reg = BNX2_HC_COMMAND;
2150 coal_val = CNIC_RD(dev, coal_reg);
2151 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2152 struct status_block_msix *sblk = cp->status_blk;
2153
2154 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
2155 coal_reg = BNX2_HC_COALESCE_NOW;
2156 coal_val = 1 << (11 + sb_id);
2157 }
2158 i = 0;
2159 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
2160 CNIC_WR(dev, coal_reg, coal_val);
2161 udelay(10);
2162 i++;
2163 barrier();
2164 }
2165 cp->rx_cons = *cp->rx_cons_ptr;
2166
2167 cid_addr = GET_CID_ADDR(2);
2168 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
2169 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
2170 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2171
2172 if (sb_id == 0)
2173 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
2174 else
2175 val = BNX2_L2CTX_STATUSB_NUM(sb_id);
2176 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2177
2178 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
2179 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2180 dma_addr_t buf_map;
2181 int n = (i % cp->l2_rx_ring_size) + 1;
2182
2183 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
2184 rxbd->rx_bd_len = cp->l2_single_buf_size;
2185 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
2186 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
2187 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2188 }
2189 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
2190 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
2191 rxbd->rx_bd_haddr_hi = val;
2192
2193 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
2194 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
2195 rxbd->rx_bd_haddr_lo = val;
2196
2197 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
2198 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
2199}
2200
2201static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
2202{
2203 struct kwqe *wqes[1], l2kwqe;
2204
2205 memset(&l2kwqe, 0, sizeof(l2kwqe));
2206 wqes[0] = &l2kwqe;
2207 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
2208 (L2_KWQE_OPCODE_VALUE_FLUSH <<
2209 KWQE_OPCODE_SHIFT) | 2;
2210 dev->submit_kwqes(dev, wqes, 1);
2211}
2212
2213static void cnic_set_bnx2_mac(struct cnic_dev *dev)
2214{
2215 struct cnic_local *cp = dev->cnic_priv;
2216 u32 val;
2217
2218 val = cp->func << 2;
2219
2220 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
2221
2222 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2223 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
2224 dev->mac_addr[0] = (u8) (val >> 8);
2225 dev->mac_addr[1] = (u8) val;
2226
2227 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
2228
2229 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2230 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
2231 dev->mac_addr[2] = (u8) (val >> 24);
2232 dev->mac_addr[3] = (u8) (val >> 16);
2233 dev->mac_addr[4] = (u8) (val >> 8);
2234 dev->mac_addr[5] = (u8) val;
2235
2236 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
2237
2238 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
2239 if (CHIP_NUM(cp) != CHIP_NUM_5709)
2240 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
2241
2242 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
2243 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
2244 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
2245}
2246
2247static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2248{
2249 struct cnic_local *cp = dev->cnic_priv;
2250 struct cnic_eth_dev *ethdev = cp->ethdev;
2251 struct status_block *sblk = cp->status_blk;
2252 u32 val;
2253 int err;
2254
2255 cnic_set_bnx2_mac(dev);
2256
2257 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
2258 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2259 if (BCM_PAGE_BITS > 12)
2260 val |= (12 - 8) << 4;
2261 else
2262 val |= (BCM_PAGE_BITS - 8) << 4;
2263
2264 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
2265
2266 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
2267 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
2268 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
2269
2270 err = cnic_setup_5709_context(dev, 1);
2271 if (err)
2272 return err;
2273
2274 cnic_init_context(dev, KWQ_CID);
2275 cnic_init_context(dev, KCQ_CID);
2276
2277 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
2278 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
2279
2280 cp->max_kwq_idx = MAX_KWQ_IDX;
2281 cp->kwq_prod_idx = 0;
2282 cp->kwq_con_idx = 0;
2283 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
2284
2285 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
2286 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
2287 else
2288 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
2289
2290 /* Initialize the kernel work queue context. */
2291 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2292 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2293 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
2294
2295 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
2296 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2297
2298 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
2299 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2300
2301 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
2302 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2303
2304 val = (u32) cp->kwq_info.pgtbl_map;
2305 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2306
2307 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
2308 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
2309
2310 cp->kcq_prod_idx = 0;
2311
2312 /* Initialize the kernel complete queue context. */
2313 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2314 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2315 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
2316
2317 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
2318 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2319
2320 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
2321 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2322
2323 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
2324 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2325
2326 val = (u32) cp->kcq_info.pgtbl_map;
2327 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2328
2329 cp->int_num = 0;
2330 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2331 u32 sb_id = cp->status_blk_num;
2332 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
2333
2334 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2335 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2336 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2337 }
2338
2339 /* Enable Commnad Scheduler notification when we write to the
2340 * host producer index of the kernel contexts. */
2341 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
2342
2343 /* Enable Command Scheduler notification when we write to either
2344 * the Send Queue or Receive Queue producer indexes of the kernel
2345 * bypass contexts. */
2346 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
2347 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
2348
2349 /* Notify COM when the driver post an application buffer. */
2350 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
2351
2352 /* Set the CP and COM doorbells. These two processors polls the
2353 * doorbell for a non zero value before running. This must be done
2354 * after setting up the kernel queue contexts. */
2355 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
2356 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
2357
2358 cnic_init_bnx2_tx_ring(dev);
2359 cnic_init_bnx2_rx_ring(dev);
2360
2361 err = cnic_init_bnx2_irq(dev);
2362 if (err) {
2363 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
2364 dev->netdev->name);
2365 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2366 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2367 return err;
2368 }
2369
2370 return 0;
2371}
2372
2373static int cnic_start_hw(struct cnic_dev *dev)
2374{
2375 struct cnic_local *cp = dev->cnic_priv;
2376 struct cnic_eth_dev *ethdev = cp->ethdev;
2377 int err;
2378
2379 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2380 return -EALREADY;
2381
2382 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2383 if (err) {
2384 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2385 dev->netdev->name);
2386 goto err2;
2387 }
2388
2389 dev->regview = ethdev->io_base;
2390 cp->chip_id = ethdev->chip_id;
2391 pci_dev_get(dev->pcidev);
2392 cp->func = PCI_FUNC(dev->pcidev->devfn);
2393 cp->status_blk = ethdev->irq_arr[0].status_blk;
2394 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
2395
2396 err = cp->alloc_resc(dev);
2397 if (err) {
2398 printk(KERN_ERR PFX "%s: allocate resource failure\n",
2399 dev->netdev->name);
2400 goto err1;
2401 }
2402
2403 err = cp->start_hw(dev);
2404 if (err)
2405 goto err1;
2406
2407 err = cnic_cm_open(dev);
2408 if (err)
2409 goto err1;
2410
2411 set_bit(CNIC_F_CNIC_UP, &dev->flags);
2412
2413 cp->enable_int(dev);
2414
2415 return 0;
2416
2417err1:
2418 ethdev->drv_unregister_cnic(dev->netdev);
2419 cp->free_resc(dev);
2420 pci_dev_put(dev->pcidev);
2421err2:
2422 return err;
2423}
2424
2425static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2426{
2427 struct cnic_local *cp = dev->cnic_priv;
2428 struct cnic_eth_dev *ethdev = cp->ethdev;
2429
2430 cnic_disable_bnx2_int_sync(dev);
2431
2432 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2433 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2434
2435 cnic_init_context(dev, KWQ_CID);
2436 cnic_init_context(dev, KCQ_CID);
2437
2438 cnic_setup_5709_context(dev, 0);
2439 cnic_free_irq(dev);
2440
2441 ethdev->drv_unregister_cnic(dev->netdev);
2442
2443 cnic_free_resc(dev);
2444}
2445
2446static void cnic_stop_hw(struct cnic_dev *dev)
2447{
2448 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2449 struct cnic_local *cp = dev->cnic_priv;
2450
2451 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
2452 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
2453 synchronize_rcu();
2454 cnic_cm_shutdown(dev);
2455 cp->stop_hw(dev);
2456 pci_dev_put(dev->pcidev);
2457 }
2458}
2459
2460static void cnic_free_dev(struct cnic_dev *dev)
2461{
2462 int i = 0;
2463
2464 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
2465 msleep(100);
2466 i++;
2467 }
2468 if (atomic_read(&dev->ref_count) != 0)
2469 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
2470 " to zero.\n", dev->netdev->name);
2471
2472 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
2473 dev_put(dev->netdev);
2474 kfree(dev);
2475}
2476
2477static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
2478 struct pci_dev *pdev)
2479{
2480 struct cnic_dev *cdev;
2481 struct cnic_local *cp;
2482 int alloc_size;
2483
2484 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
2485
2486 cdev = kzalloc(alloc_size , GFP_KERNEL);
2487 if (cdev == NULL) {
2488 printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
2489 dev->name);
2490 return NULL;
2491 }
2492
2493 cdev->netdev = dev;
2494 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
2495 cdev->register_device = cnic_register_device;
2496 cdev->unregister_device = cnic_unregister_device;
2497 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
2498
2499 cp = cdev->cnic_priv;
2500 cp->dev = cdev;
2501 cp->uio_dev = -1;
2502 cp->l2_single_buf_size = 0x400;
2503 cp->l2_rx_ring_size = 3;
2504
2505 spin_lock_init(&cp->cnic_ulp_lock);
2506
2507 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
2508
2509 return cdev;
2510}
2511
2512static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2513{
2514 struct pci_dev *pdev;
2515 struct cnic_dev *cdev;
2516 struct cnic_local *cp;
2517 struct cnic_eth_dev *ethdev = NULL;
2518 struct cnic_eth_dev *(*probe)(void *) = NULL;
2519
2520 probe = __symbol_get("bnx2_cnic_probe");
2521 if (probe) {
2522 ethdev = (*probe)(dev);
2523 symbol_put_addr(probe);
2524 }
2525 if (!ethdev)
2526 return NULL;
2527
2528 pdev = ethdev->pdev;
2529 if (!pdev)
2530 return NULL;
2531
2532 dev_hold(dev);
2533 pci_dev_get(pdev);
2534 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
2535 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
2536 u8 rev;
2537
2538 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
2539 if (rev < 0x10) {
2540 pci_dev_put(pdev);
2541 goto cnic_err;
2542 }
2543 }
2544 pci_dev_put(pdev);
2545
2546 cdev = cnic_alloc_dev(dev, pdev);
2547 if (cdev == NULL)
2548 goto cnic_err;
2549
2550 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
2551 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
2552
2553 cp = cdev->cnic_priv;
2554 cp->ethdev = ethdev;
2555 cdev->pcidev = pdev;
2556
2557 cp->cnic_ops = &cnic_bnx2_ops;
2558 cp->start_hw = cnic_start_bnx2_hw;
2559 cp->stop_hw = cnic_stop_bnx2_hw;
2560 cp->setup_pgtbl = cnic_setup_page_tbl;
2561 cp->alloc_resc = cnic_alloc_bnx2_resc;
2562 cp->free_resc = cnic_free_resc;
2563 cp->start_cm = cnic_cm_init_bnx2_hw;
2564 cp->stop_cm = cnic_cm_stop_bnx2_hw;
2565 cp->enable_int = cnic_enable_bnx2_int;
2566 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
2567 cp->close_conn = cnic_close_bnx2_conn;
2568 cp->next_idx = cnic_bnx2_next_idx;
2569 cp->hw_idx = cnic_bnx2_hw_idx;
2570 return cdev;
2571
2572cnic_err:
2573 dev_put(dev);
2574 return NULL;
2575}
2576
2577static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2578{
2579 struct ethtool_drvinfo drvinfo;
2580 struct cnic_dev *cdev = NULL;
2581
2582 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
2583 memset(&drvinfo, 0, sizeof(drvinfo));
2584 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
2585
2586 if (!strcmp(drvinfo.driver, "bnx2"))
2587 cdev = init_bnx2_cnic(dev);
2588 if (cdev) {
2589 write_lock(&cnic_dev_lock);
2590 list_add(&cdev->list, &cnic_dev_list);
2591 write_unlock(&cnic_dev_lock);
2592 }
2593 }
2594 return cdev;
2595}
2596
2597/**
2598 * netdev event handler
2599 */
2600static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2601 void *ptr)
2602{
2603 struct net_device *netdev = ptr;
2604 struct cnic_dev *dev;
2605 int if_type;
2606 int new_dev = 0;
2607
2608 dev = cnic_from_netdev(netdev);
2609
2610 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
2611 /* Check for the hot-plug device */
2612 dev = is_cnic_dev(netdev);
2613 if (dev) {
2614 new_dev = 1;
2615 cnic_hold(dev);
2616 }
2617 }
2618 if (dev) {
2619 struct cnic_local *cp = dev->cnic_priv;
2620
2621 if (new_dev)
2622 cnic_ulp_init(dev);
2623 else if (event == NETDEV_UNREGISTER)
2624 cnic_ulp_exit(dev);
2625 else if (event == NETDEV_UP) {
2626 mutex_lock(&cnic_lock);
2627 if (!cnic_start_hw(dev))
2628 cnic_ulp_start(dev);
2629 mutex_unlock(&cnic_lock);
2630 }
2631
2632 rcu_read_lock();
2633 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2634 struct cnic_ulp_ops *ulp_ops;
2635 void *ctx;
2636
2637 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
2638 if (!ulp_ops || !ulp_ops->indicate_netevent)
2639 continue;
2640
2641 ctx = cp->ulp_handle[if_type];
2642
2643 ulp_ops->indicate_netevent(ctx, event);
2644 }
2645 rcu_read_unlock();
2646
2647 if (event == NETDEV_GOING_DOWN) {
2648 mutex_lock(&cnic_lock);
2649 cnic_ulp_stop(dev);
2650 cnic_stop_hw(dev);
2651 mutex_unlock(&cnic_lock);
2652 } else if (event == NETDEV_UNREGISTER) {
2653 write_lock(&cnic_dev_lock);
2654 list_del_init(&dev->list);
2655 write_unlock(&cnic_dev_lock);
2656
2657 cnic_put(dev);
2658 cnic_free_dev(dev);
2659 goto done;
2660 }
2661 cnic_put(dev);
2662 }
2663done:
2664 return NOTIFY_DONE;
2665}
2666
2667static struct notifier_block cnic_netdev_notifier = {
2668 .notifier_call = cnic_netdev_event
2669};
2670
2671static void cnic_release(void)
2672{
2673 struct cnic_dev *dev;
2674
2675 while (!list_empty(&cnic_dev_list)) {
2676 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
2677 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2678 cnic_ulp_stop(dev);
2679 cnic_stop_hw(dev);
2680 }
2681
2682 cnic_ulp_exit(dev);
2683 list_del_init(&dev->list);
2684 cnic_free_dev(dev);
2685 }
2686}
2687
2688static int __init cnic_init(void)
2689{
2690 int rc = 0;
2691
2692 printk(KERN_INFO "%s", version);
2693
2694 rc = register_netdevice_notifier(&cnic_netdev_notifier);
2695 if (rc) {
2696 cnic_release();
2697 return rc;
2698 }
2699
2700 return 0;
2701}
2702
2703static void __exit cnic_exit(void)
2704{
2705 unregister_netdevice_notifier(&cnic_netdev_notifier);
2706 cnic_release();
2707 return;
2708}
2709
2710module_init(cnic_init);
2711module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 000000000000..5192d4a9df5a
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
1/* cnic.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_H
13#define CNIC_H
14
15#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16
17
18#define KWQ_CID 24
19#define KCQ_CID 25
20
21/*
22 * krnlq_context definition
23 */
24#define L5_KRNLQ_FLAGS 0x00000000
25#define L5_KRNLQ_SIZE 0x00000000
26#define L5_KRNLQ_TYPE 0x00000000
27#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
28#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
29#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
30#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
31#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
32#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
33#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
34#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
35#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
36#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
37#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
38#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
39#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
40#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
41#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
42#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
43#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
44#define KRNLQ_TYPE_TYPE (0xf<<28)
45#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
46#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
47
48#define L5_KRNLQ_HOST_QIDX 0x00000004
49#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
50#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
51#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
52#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
53#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
54#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
55#define L5_KRNLQ_NX_PG_QIDX 0x00000018
56#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
57#define L5_KRNLQ_QIDX_INCR 0x0000001c
58#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
59#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
60
61#define BNX2_PG_CTX_MAP 0x1a0034
62#define BNX2_ISCSI_CTX_MAP 0x1a0074
63
64struct cnic_redirect_entry {
65 struct dst_entry *old_dst;
66 struct dst_entry *new_dst;
67};
68
69#define MAX_COMPLETED_KCQE 64
70
71#define MAX_CNIC_L5_CONTEXT 256
72
73#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
74
75#define MAX_ISCSI_TBL_SZ 256
76
77#define CNIC_LOCAL_PORT_MIN 60000
78#define CNIC_LOCAL_PORT_MAX 61000
79#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
80
81#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
82#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
83#define MAX_KWQE_CNT (KWQE_CNT - 1)
84#define MAX_KCQE_CNT (KCQE_CNT - 1)
85
86#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
87#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
88
89#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
90#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
91
92#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
93#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
94
95#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
96 (MAX_KCQE_CNT - 1)) ? \
97 (x) + 2 : (x) + 1
98
99#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
100#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
101#define BNX2X_KWQ_DATA(cp, x) \
102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
103
104#define DEF_IPID_COUNT 0xc001
105
106#define DEF_KA_TIMEOUT 10000
107#define DEF_KA_INTERVAL 300000
108#define DEF_KA_MAX_PROBE_COUNT 3
109#define DEF_TOS 0
110#define DEF_TTL 0xfe
111#define DEF_SND_SEQ_SCALE 0
112#define DEF_RCV_BUF 0xffff
113#define DEF_SND_BUF 0xffff
114#define DEF_SEED 0
115#define DEF_MAX_RT_TIME 500
116#define DEF_MAX_DA_COUNT 2
117#define DEF_SWS_TIMER 1000
118#define DEF_MAX_CWND 0xffff
119
120struct cnic_ctx {
121 u32 cid;
122 void *ctx;
123 dma_addr_t mapping;
124};
125
126#define BNX2_MAX_CID 0x2000
127
128struct cnic_dma {
129 int num_pages;
130 void **pg_arr;
131 dma_addr_t *pg_map_arr;
132 int pgtbl_size;
133 u32 *pgtbl;
134 dma_addr_t pgtbl_map;
135};
136
137struct cnic_id_tbl {
138 spinlock_t lock;
139 u32 start;
140 u32 max;
141 u32 next;
142 unsigned long *table;
143};
144
145#define CNIC_KWQ16_DATA_SIZE 128
146
147struct kwqe_16_data {
148 u8 data[CNIC_KWQ16_DATA_SIZE];
149};
150
151struct cnic_iscsi {
152 struct cnic_dma task_array_info;
153 struct cnic_dma r2tq_info;
154 struct cnic_dma hq_info;
155};
156
157struct cnic_context {
158 u32 cid;
159 struct kwqe_16_data *kwqe_data;
160 dma_addr_t kwqe_data_mapping;
161 wait_queue_head_t waitq;
162 int wait_cond;
163 unsigned long timestamp;
164 u32 ctx_flags;
165#define CTX_FL_OFFLD_START 0x00000001
166 u8 ulp_proto_id;
167 union {
168 struct cnic_iscsi *iscsi;
169 } proto;
170};
171
172struct cnic_local {
173
174 spinlock_t cnic_ulp_lock;
175 void *ulp_handle[MAX_CNIC_ULP_TYPE];
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0
178#define ULP_F_START 1
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180
181 /* protected by ulp_lock */
182 u32 cnic_local_flags;
183#define CNIC_LCL_FL_KWQ_INIT 0x00000001
184
185 struct cnic_dev *dev;
186
187 struct cnic_eth_dev *ethdev;
188
189 void *l2_ring;
190 dma_addr_t l2_ring_map;
191 int l2_ring_size;
192 int l2_rx_ring_size;
193
194 void *l2_buf;
195 dma_addr_t l2_buf_map;
196 int l2_buf_size;
197 int l2_single_buf_size;
198
199 u16 *rx_cons_ptr;
200 u16 *tx_cons_ptr;
201 u16 rx_cons;
202 u16 tx_cons;
203
204 u32 kwq_cid_addr;
205 u32 kcq_cid_addr;
206
207 struct cnic_dma kwq_info;
208 struct kwqe **kwq;
209
210 struct cnic_dma kwq_16_data_info;
211
212 u16 max_kwq_idx;
213
214 u16 kwq_prod_idx;
215 u32 kwq_io_addr;
216
217 u16 *kwq_con_idx_ptr;
218 u16 kwq_con_idx;
219
220 struct cnic_dma kcq_info;
221 struct kcqe **kcq;
222
223 u16 kcq_prod_idx;
224 u32 kcq_io_addr;
225
226 void *status_blk;
227 struct status_block_msix *bnx2_status_blk;
228 struct host_status_block *bnx2x_status_blk;
229
230 u32 status_blk_num;
231 u32 int_num;
232 u32 last_status_idx;
233 struct tasklet_struct cnic_irq_task;
234
235 struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
236
237 struct cnic_sock *csk_tbl;
238 struct cnic_id_tbl csk_port_tbl;
239
240 struct cnic_dma conn_buf_info;
241 struct cnic_dma gbl_buf_info;
242
243 struct cnic_iscsi *iscsi_tbl;
244 struct cnic_context *ctx_tbl;
245 struct cnic_id_tbl cid_tbl;
246 int max_iscsi_conn;
247 atomic_t iscsi_conn;
248
249 /* per connection parameters */
250 int num_iscsi_tasks;
251 int num_ccells;
252 int task_array_size;
253 int r2tq_size;
254 int hq_size;
255 int num_cqs;
256
257 struct cnic_ctx *ctx_arr;
258 int ctx_blks;
259 int ctx_blk_size;
260 int cids_per_blk;
261
262 u32 chip_id;
263 int func;
264 u32 shmem_base;
265
266 u32 uio_dev;
267 struct uio_info *cnic_uinfo;
268
269 struct cnic_ops *cnic_ops;
270 int (*start_hw)(struct cnic_dev *);
271 void (*stop_hw)(struct cnic_dev *);
272 void (*setup_pgtbl)(struct cnic_dev *,
273 struct cnic_dma *);
274 int (*alloc_resc)(struct cnic_dev *);
275 void (*free_resc)(struct cnic_dev *);
276 int (*start_cm)(struct cnic_dev *);
277 void (*stop_cm)(struct cnic_dev *);
278 void (*enable_int)(struct cnic_dev *);
279 void (*disable_int_sync)(struct cnic_dev *);
280 void (*ack_int)(struct cnic_dev *);
281 void (*close_conn)(struct cnic_sock *, u32 opcode);
282 u16 (*next_idx)(u16);
283 u16 (*hw_idx)(u16);
284};
285
286struct bnx2x_bd_chain_next {
287 u32 addr_lo;
288 u32 addr_hi;
289 u8 reserved[8];
290};
291
292#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
293#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
294
295#define CDU_REGION_NUMBER_XCM_AG 2
296#define CDU_REGION_NUMBER_UCM_AG 4
297
298#endif
299
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 000000000000..cee80f694457
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
1
2/* cnic.c: Broadcom CNIC core network driver.
3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#ifndef CNIC_DEFS_H
13#define CNIC_DEFS_H
14
15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
20#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
21#define L4_KWQE_OPCODE_VALUE_RESET (53)
22#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
23#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
24#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
25
26#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
27#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
28#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
29
30#define L5CM_RAMROD_CMD_ID_BASE (0x80)
31#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
32#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
33#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
34#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
35#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
36
37/* KCQ (kernel completion queue) response op codes */
38#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
39#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
40#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
41#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
42#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
43#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
44#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
45
46#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
47#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49
50/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53
54#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2)
56
57/*
58 * L4 KCQ CQE
59 */
60struct l4_kcq {
61 u32 cid;
62 u32 pg_cid;
63 u32 conn_id;
64 u32 pg_host_opaque;
65#if defined(__BIG_ENDIAN)
66 u16 status;
67 u16 reserved1;
68#elif defined(__LITTLE_ENDIAN)
69 u16 reserved1;
70 u16 status;
71#endif
72 u32 reserved2[2];
73#if defined(__BIG_ENDIAN)
74 u8 flags;
75#define L4_KCQ_RESERVED3 (0x7<<0)
76#define L4_KCQ_RESERVED3_SHIFT 0
77#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
78#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
79#define L4_KCQ_LAYER_CODE (0x7<<4)
80#define L4_KCQ_LAYER_CODE_SHIFT 4
81#define L4_KCQ_RESERVED4 (0x1<<7)
82#define L4_KCQ_RESERVED4_SHIFT 7
83 u8 op_code;
84 u16 qe_self_seq;
85#elif defined(__LITTLE_ENDIAN)
86 u16 qe_self_seq;
87 u8 op_code;
88 u8 flags;
89#define L4_KCQ_RESERVED3 (0xF<<0)
90#define L4_KCQ_RESERVED3_SHIFT 0
91#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
92#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
93#define L4_KCQ_LAYER_CODE (0x7<<4)
94#define L4_KCQ_LAYER_CODE_SHIFT 4
95#define L4_KCQ_RESERVED4 (0x1<<7)
96#define L4_KCQ_RESERVED4_SHIFT 7
97#endif
98};
99
100
101/*
102 * L4 KCQ CQE PG upload
103 */
104struct l4_kcq_upload_pg {
105 u32 pg_cid;
106#if defined(__BIG_ENDIAN)
107 u16 pg_status;
108 u16 pg_ipid_count;
109#elif defined(__LITTLE_ENDIAN)
110 u16 pg_ipid_count;
111 u16 pg_status;
112#endif
113 u32 reserved1[5];
114#if defined(__BIG_ENDIAN)
115 u8 flags;
116#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
117#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
118#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
119#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
120#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
121#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
122 u8 op_code;
123 u16 qe_self_seq;
124#elif defined(__LITTLE_ENDIAN)
125 u16 qe_self_seq;
126 u8 op_code;
127 u8 flags;
128#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
129#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
130#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
131#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
132#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
133#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
134#endif
135};
136
137
138/*
139 * Gracefully close the connection request
140 */
141struct l4_kwq_close_req {
142#if defined(__BIG_ENDIAN)
143 u8 flags;
144#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
145#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
146#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
147#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
148#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
149#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
150 u8 op_code;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 op_code;
155 u8 flags;
156#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
157#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
158#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
159#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
160#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
161#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
162#endif
163 u32 cid;
164 u32 reserved2[6];
165};
166
167
168/*
169 * The first request to be passed in order to establish connection in option2
170 */
171struct l4_kwq_connect_req1 {
172#if defined(__BIG_ENDIAN)
173 u8 flags;
174#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
175#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
176#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
177#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
178#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
179#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
180 u8 op_code;
181 u8 reserved0;
182 u8 conn_flags;
183#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
184#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
185#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
186#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
187#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
188#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
189#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
190#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
191#elif defined(__LITTLE_ENDIAN)
192 u8 conn_flags;
193#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
194#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
195#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
196#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
197#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
198#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
199#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
200#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
201 u8 reserved0;
202 u8 op_code;
203 u8 flags;
204#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
205#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
206#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
207#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
208#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
209#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
210#endif
211 u32 cid;
212 u32 pg_cid;
213 u32 src_ip;
214 u32 dst_ip;
215#if defined(__BIG_ENDIAN)
216 u16 dst_port;
217 u16 src_port;
218#elif defined(__LITTLE_ENDIAN)
219 u16 src_port;
220 u16 dst_port;
221#endif
222#if defined(__BIG_ENDIAN)
223 u8 rsrv1[3];
224 u8 tcp_flags;
225#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
226#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
227#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
228#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
229#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
230#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
231#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
232#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
233#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
234#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
235#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
236#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
237#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
238#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
239#elif defined(__LITTLE_ENDIAN)
240 u8 tcp_flags;
241#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
242#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
243#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
244#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
245#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
246#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
247#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
248#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
249#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
250#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
251#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
252#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
253#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
254#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
255 u8 rsrv1[3];
256#endif
257 u32 rsrv2;
258};
259
260
261/*
262 * The second ( optional )request to be passed in order to establish
263 * connection in option2 - for IPv6 only
264 */
265struct l4_kwq_connect_req2 {
266#if defined(__BIG_ENDIAN)
267 u8 flags;
268#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
269#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
270#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
271#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
272#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
273#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
274 u8 op_code;
275 u8 reserved0;
276 u8 rsrv;
277#elif defined(__LITTLE_ENDIAN)
278 u8 rsrv;
279 u8 reserved0;
280 u8 op_code;
281 u8 flags;
282#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
283#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
284#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
285#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
286#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
287#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
288#endif
289 u32 reserved2;
290 u32 src_ip_v6_2;
291 u32 src_ip_v6_3;
292 u32 src_ip_v6_4;
293 u32 dst_ip_v6_2;
294 u32 dst_ip_v6_3;
295 u32 dst_ip_v6_4;
296};
297
298
299/*
300 * The third ( and last )request to be passed in order to establish
301 * connection in option2
302 */
303struct l4_kwq_connect_req3 {
304#if defined(__BIG_ENDIAN)
305 u8 flags;
306#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
307#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
308#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
309#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
310#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
311#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
312 u8 op_code;
313 u16 reserved0;
314#elif defined(__LITTLE_ENDIAN)
315 u16 reserved0;
316 u8 op_code;
317 u8 flags;
318#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
319#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
320#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
321#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
322#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
323#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
324#endif
325 u32 ka_timeout;
326 u32 ka_interval ;
327#if defined(__BIG_ENDIAN)
328 u8 snd_seq_scale;
329 u8 ttl;
330 u8 tos;
331 u8 ka_max_probe_count;
332#elif defined(__LITTLE_ENDIAN)
333 u8 ka_max_probe_count;
334 u8 tos;
335 u8 ttl;
336 u8 snd_seq_scale;
337#endif
338#if defined(__BIG_ENDIAN)
339 u16 pmtu;
340 u16 mss;
341#elif defined(__LITTLE_ENDIAN)
342 u16 mss;
343 u16 pmtu;
344#endif
345 u32 rcv_buf;
346 u32 snd_buf;
347 u32 seed;
348};
349
350
351/*
352 * a KWQE request to offload a PG connection
353 */
354struct l4_kwq_offload_pg {
355#if defined(__BIG_ENDIAN)
356 u8 flags;
357#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
358#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
359#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
360#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
361#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
362#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
363 u8 op_code;
364 u16 reserved0;
365#elif defined(__LITTLE_ENDIAN)
366 u16 reserved0;
367 u8 op_code;
368 u8 flags;
369#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
370#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
371#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
372#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
373#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
374#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
375#endif
376#if defined(__BIG_ENDIAN)
377 u8 l2hdr_nbytes;
378 u8 pg_flags;
379#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
380#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
381#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
382#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
383#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
384#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
385 u8 da0;
386 u8 da1;
387#elif defined(__LITTLE_ENDIAN)
388 u8 da1;
389 u8 da0;
390 u8 pg_flags;
391#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
392#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
393#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
394#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
395#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
396#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
397 u8 l2hdr_nbytes;
398#endif
399#if defined(__BIG_ENDIAN)
400 u8 da2;
401 u8 da3;
402 u8 da4;
403 u8 da5;
404#elif defined(__LITTLE_ENDIAN)
405 u8 da5;
406 u8 da4;
407 u8 da3;
408 u8 da2;
409#endif
410#if defined(__BIG_ENDIAN)
411 u8 sa0;
412 u8 sa1;
413 u8 sa2;
414 u8 sa3;
415#elif defined(__LITTLE_ENDIAN)
416 u8 sa3;
417 u8 sa2;
418 u8 sa1;
419 u8 sa0;
420#endif
421#if defined(__BIG_ENDIAN)
422 u8 sa4;
423 u8 sa5;
424 u16 etype;
425#elif defined(__LITTLE_ENDIAN)
426 u16 etype;
427 u8 sa5;
428 u8 sa4;
429#endif
430#if defined(__BIG_ENDIAN)
431 u16 vlan_tag;
432 u16 ipid_start;
433#elif defined(__LITTLE_ENDIAN)
434 u16 ipid_start;
435 u16 vlan_tag;
436#endif
437#if defined(__BIG_ENDIAN)
438 u16 ipid_count;
439 u16 reserved3;
440#elif defined(__LITTLE_ENDIAN)
441 u16 reserved3;
442 u16 ipid_count;
443#endif
444 u32 host_opaque;
445};
446
447
448/*
449 * Abortively close the connection request
450 */
451struct l4_kwq_reset_req {
452#if defined(__BIG_ENDIAN)
453 u8 flags;
454#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
455#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
456#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
457#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
458#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
459#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
460 u8 op_code;
461 u16 reserved0;
462#elif defined(__LITTLE_ENDIAN)
463 u16 reserved0;
464 u8 op_code;
465 u8 flags;
466#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
467#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
468#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
469#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
470#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
471#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
472#endif
473 u32 cid;
474 u32 reserved2[6];
475};
476
477
478/*
479 * a KWQE request to update a PG connection
480 */
481struct l4_kwq_update_pg {
482#if defined(__BIG_ENDIAN)
483 u8 flags;
484#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
485#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
486#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
487#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
488#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
489#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
490 u8 opcode;
491 u16 oper16;
492#elif defined(__LITTLE_ENDIAN)
493 u16 oper16;
494 u8 opcode;
495 u8 flags;
496#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
497#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
498#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
499#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
500#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
501#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
502#endif
503 u32 pg_cid;
504 u32 pg_host_opaque;
505#if defined(__BIG_ENDIAN)
506 u8 pg_valids;
507#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
508#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
509#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
510#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
511#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
512#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
513 u8 pg_unused_a;
514 u16 pg_ipid_count;
515#elif defined(__LITTLE_ENDIAN)
516 u16 pg_ipid_count;
517 u8 pg_unused_a;
518 u8 pg_valids;
519#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
520#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
521#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
522#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
523#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
524#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
525#endif
526#if defined(__BIG_ENDIAN)
527 u16 reserverd3;
528 u8 da0;
529 u8 da1;
530#elif defined(__LITTLE_ENDIAN)
531 u8 da1;
532 u8 da0;
533 u16 reserverd3;
534#endif
535#if defined(__BIG_ENDIAN)
536 u8 da2;
537 u8 da3;
538 u8 da4;
539 u8 da5;
540#elif defined(__LITTLE_ENDIAN)
541 u8 da5;
542 u8 da4;
543 u8 da3;
544 u8 da2;
545#endif
546 u32 reserved4;
547 u32 reserved5;
548};
549
550
551/*
552 * a KWQE request to upload a PG or L4 context
553 */
554struct l4_kwq_upload {
555#if defined(__BIG_ENDIAN)
556 u8 flags;
557#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
558#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
559#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
560#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
561#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
562#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
563 u8 opcode;
564 u16 oper16;
565#elif defined(__LITTLE_ENDIAN)
566 u16 oper16;
567 u8 opcode;
568 u8 flags;
569#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
570#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
571#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
572#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
573#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
574#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
575#endif
576 u32 cid;
577 u32 reserved2[6];
578};
579
580#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 000000000000..06380963a34e
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
1/* cnic_if.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
15#define CNIC_MODULE_VERSION "2.0.0"
16#define CNIC_MODULE_RELDATE "May 21, 2009"
17
18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2
21#define MAX_CNIC_ULP_TYPE_EXT 2
22#define MAX_CNIC_ULP_TYPE 3
23
24struct kwqe {
25 u32 kwqe_op_flag;
26
27#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
31
32 u32 kwqe_info0;
33 u32 kwqe_info1;
34 u32 kwqe_info2;
35 u32 kwqe_info3;
36 u32 kwqe_info4;
37 u32 kwqe_info5;
38 u32 kwqe_info6;
39};
40
41struct kwqe_16 {
42 u32 kwqe_info0;
43 u32 kwqe_info1;
44 u32 kwqe_info2;
45 u32 kwqe_info3;
46};
47
48struct kcqe {
49 u32 kcqe_info0;
50 u32 kcqe_info1;
51 u32 kcqe_info2;
52 u32 kcqe_info3;
53 u32 kcqe_info4;
54 u32 kcqe_info5;
55 u32 kcqe_info6;
56 u32 kcqe_op_flag;
57 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
58 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
59 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
60 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
61 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
65 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16)
68 #define KCQE_OPCODE(op) \
69 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
70};
71
72#define MAX_CNIC_CTL_DATA 64
73#define MAX_DRV_CTL_DATA 64
74
75#define CNIC_CTL_STOP_CMD 1
76#define CNIC_CTL_START_CMD 2
77#define CNIC_CTL_COMPLETION_CMD 3
78
79#define DRV_CTL_IO_WR_CMD 0x101
80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105
84
85struct cnic_ctl_completion {
86 u32 cid;
87};
88
89struct drv_ctl_completion {
90 u32 comp_count;
91};
92
93struct cnic_ctl_info {
94 int cmd;
95 union {
96 struct cnic_ctl_completion comp;
97 char bytes[MAX_CNIC_CTL_DATA];
98 } data;
99};
100
101struct drv_ctl_io {
102 u32 cid_addr;
103 u32 offset;
104 u32 data;
105 dma_addr_t dma_addr;
106};
107
108struct drv_ctl_info {
109 int cmd;
110 union {
111 struct drv_ctl_completion comp;
112 struct drv_ctl_io io;
113 char bytes[MAX_DRV_CTL_DATA];
114 } data;
115};
116
117struct cnic_ops {
118 struct module *cnic_owner;
119 /* Calls to these functions are protected by RCU. When
120 * unregistering, we wait for any calls to complete before
121 * continuing.
122 */
123 int (*cnic_handler)(void *, void *);
124 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
125};
126
127#define MAX_CNIC_VEC 8
128
129struct cnic_irq {
130 unsigned int vector;
131 void *status_blk;
132 u32 status_blk_num;
133 u32 irq_flags;
134#define CNIC_IRQ_FL_MSIX 0x00000001
135};
136
137struct cnic_eth_dev {
138 struct module *drv_owner;
139 u32 drv_state;
140#define CNIC_DRV_STATE_REGD 0x00000001
141#define CNIC_DRV_STATE_USING_MSIX 0x00000002
142 u32 chip_id;
143 u32 max_kwqe_pending;
144 struct pci_dev *pdev;
145 void __iomem *io_base;
146
147 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len;
149 int ctx_blk_size;
150 u32 starting_cid;
151 u32 max_iscsi_conn;
152 u32 max_fcoe_conn;
153 u32 max_rdma_conn;
154 u32 reserved0[2];
155
156 int num_irq;
157 struct cnic_irq irq_arr[MAX_CNIC_VEC];
158 int (*drv_register_cnic)(struct net_device *,
159 struct cnic_ops *, void *);
160 int (*drv_unregister_cnic)(struct net_device *);
161 int (*drv_submit_kwqes_32)(struct net_device *,
162 struct kwqe *[], u32);
163 int (*drv_submit_kwqes_16)(struct net_device *,
164 struct kwqe_16 *[], u32);
165 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
166 unsigned long reserved1[2];
167};
168
169struct cnic_sockaddr {
170 union {
171 struct sockaddr_in v4;
172 struct sockaddr_in6 v6;
173 } local;
174 union {
175 struct sockaddr_in v4;
176 struct sockaddr_in6 v6;
177 } remote;
178};
179
180struct cnic_sock {
181 struct cnic_dev *dev;
182 void *context;
183 u32 src_ip[4];
184 u32 dst_ip[4];
185 u16 src_port;
186 u16 dst_port;
187 u16 vlan_id;
188 unsigned char old_ha[6];
189 unsigned char ha[6];
190 u32 mtu;
191 u32 cid;
192 u32 l5_cid;
193 u32 pg_cid;
194 int ulp_type;
195
196 u32 ka_timeout;
197 u32 ka_interval;
198 u8 ka_max_probe_count;
199 u8 tos;
200 u8 ttl;
201 u8 snd_seq_scale;
202 u32 rcv_buf;
203 u32 snd_buf;
204 u32 seed;
205
206 unsigned long tcp_flags;
207#define SK_TCP_NO_DELAY_ACK 0x1
208#define SK_TCP_KEEP_ALIVE 0x2
209#define SK_TCP_NAGLE 0x4
210#define SK_TCP_TIMESTAMP 0x8
211#define SK_TCP_SACK 0x10
212#define SK_TCP_SEG_SCALING 0x20
213 unsigned long flags;
214#define SK_F_INUSE 0
215#define SK_F_OFFLD_COMPLETE 1
216#define SK_F_OFFLD_SCHED 2
217#define SK_F_PG_OFFLD_COMPLETE 3
218#define SK_F_CONNECT_START 4
219#define SK_F_IPV6 5
220#define SK_F_CLOSING 7
221
222 atomic_t ref_count;
223 u32 state;
224 struct kwqe kwqe1;
225 struct kwqe kwqe2;
226 struct kwqe kwqe3;
227};
228
229struct cnic_dev {
230 struct net_device *netdev;
231 struct pci_dev *pcidev;
232 void __iomem *regview;
233 struct list_head list;
234
235 int (*register_device)(struct cnic_dev *dev, int ulp_type,
236 void *ulp_ctx);
237 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
238 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
239 u32 num_wqes);
240 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
241 u32 num_wqes);
242
243 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
244 void *);
245 int (*cm_destroy)(struct cnic_sock *);
246 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
247 int (*cm_abort)(struct cnic_sock *);
248 int (*cm_close)(struct cnic_sock *);
249 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
250 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
251 char *data, u16 data_size);
252 unsigned long flags;
253#define CNIC_F_CNIC_UP 1
254#define CNIC_F_BNX2_CLASS 3
255#define CNIC_F_BNX2X_CLASS 4
256 atomic_t ref_count;
257 u8 mac_addr[6];
258
259 int max_iscsi_conn;
260 int max_fcoe_conn;
261 int max_rdma_conn;
262
263 void *cnic_priv;
264};
265
266#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
267#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
268#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
269#define CNIC_RD(dev, off) readl(dev->regview + off)
270#define CNIC_RD16(dev, off) readw(dev->regview + off)
271
272struct cnic_ulp_ops {
273 /* Calls to these functions are protected by RCU. When
274 * unregistering, we wait for any calls to complete before
275 * continuing.
276 */
277
278 void (*cnic_init)(struct cnic_dev *dev);
279 void (*cnic_exit)(struct cnic_dev *dev);
280 void (*cnic_start)(void *ulp_ctx);
281 void (*cnic_stop)(void *ulp_ctx);
282 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
283 u32 num_cqes);
284 void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
285 void (*cm_connect_complete)(struct cnic_sock *);
286 void (*cm_close_complete)(struct cnic_sock *);
287 void (*cm_abort_complete)(struct cnic_sock *);
288 void (*cm_remote_close)(struct cnic_sock *);
289 void (*cm_remote_abort)(struct cnic_sock *);
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size);
292 struct module *owner;
293};
294
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
296
297extern int cnic_unregister_driver(int ulp_type);
298
299#endif
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 733fe3bf6285..b2fe5cdbcaee 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -11,6 +11,24 @@
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13 13
14#define ZFCP_MODEL_PRIV 0x4
15
16static struct ccw_device_id zfcp_ccw_device_id[] = {
17 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
18 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
19 {},
20};
21MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
22
23/**
24 * zfcp_ccw_priv_sch - check if subchannel is privileged
25 * @adapter: Adapter/Subchannel to check
26 */
27int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
28{
29 return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
30}
31
14/** 32/**
15 * zfcp_ccw_probe - probe function of zfcp driver 33 * zfcp_ccw_probe - probe function of zfcp driver
16 * @ccw_device: pointer to belonging ccw device 34 * @ccw_device: pointer to belonging ccw device
@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
176 "ccnoti4", NULL); 194 "ccnoti4", NULL);
177 break; 195 break;
178 case CIO_BOXED: 196 case CIO_BOXED:
179 dev_warn(&adapter->ccw_device->dev, 197 dev_warn(&adapter->ccw_device->dev, "The FCP device "
180 "The ccw device did not respond in time.\n"); 198 "did not respond within the specified time\n");
181 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 199 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
182 break; 200 break;
183 } 201 }
@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
199 up(&zfcp_data.config_sema); 217 up(&zfcp_data.config_sema);
200} 218}
201 219
202static struct ccw_device_id zfcp_ccw_device_id[] = {
203 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
204 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
205 {},
206};
207
208MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
209
210static struct ccw_driver zfcp_ccw_driver = { 220static struct ccw_driver zfcp_ccw_driver = {
211 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
212 .name = "zfcp", 222 .name = "zfcp",
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0a1a5dd8d018..b99b87ce5a39 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
163 } 163 }
164 164
165 response->fsf_command = fsf_req->fsf_command; 165 response->fsf_command = fsf_req->fsf_command;
166 response->fsf_reqid = (unsigned long)fsf_req; 166 response->fsf_reqid = fsf_req->req_id;
167 response->fsf_seqno = fsf_req->seq_no; 167 response->fsf_seqno = fsf_req->seq_no;
168 response->fsf_issued = fsf_req->issued; 168 response->fsf_issued = fsf_req->issued;
169 response->fsf_prot_status = qtcb->prefix.prot_status; 169 response->fsf_prot_status = qtcb->prefix.prot_status;
@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
737 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 737 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
738 memset(r, 0, sizeof(*r)); 738 memset(r, 0, sizeof(*r));
739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
740 r->fsf_reqid = (unsigned long)fsf_req; 740 r->fsf_reqid = fsf_req->req_id;
741 r->fsf_seqno = fsf_req->seq_no; 741 r->fsf_seqno = fsf_req->seq_no;
742 r->s_id = fc_host_port_id(adapter->scsi_host); 742 r->s_id = fc_host_port_id(adapter->scsi_host);
743 r->d_id = wka_port->d_id; 743 r->d_id = wka_port->d_id;
@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
773 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 773 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
774 memset(r, 0, sizeof(*r)); 774 memset(r, 0, sizeof(*r));
775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
776 r->fsf_reqid = (unsigned long)fsf_req; 776 r->fsf_reqid = fsf_req->req_id;
777 r->fsf_seqno = fsf_req->seq_no; 777 r->fsf_seqno = fsf_req->seq_no;
778 r->s_id = wka_port->d_id; 778 r->s_id = wka_port->d_id;
779 r->d_id = fc_host_port_id(adapter->scsi_host); 779 r->d_id = fc_host_port_id(adapter->scsi_host);
@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
803 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 803 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
804 memset(rec, 0, sizeof(*rec)); 804 memset(rec, 0, sizeof(*rec));
805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
806 rec->fsf_reqid = (unsigned long)fsf_req; 806 rec->fsf_reqid = fsf_req->req_id;
807 rec->fsf_seqno = fsf_req->seq_no; 807 rec->fsf_seqno = fsf_req->seq_no;
808 rec->s_id = s_id; 808 rec->s_id = s_id;
809 rec->d_id = d_id; 809 rec->d_id = d_id;
@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
965 ZFCP_DBF_SCSI_FCP_SNS_INFO); 965 ZFCP_DBF_SCSI_FCP_SNS_INFO);
966 } 966 }
967 967
968 rec->fsf_reqid = (unsigned long)fsf_req; 968 rec->fsf_reqid = fsf_req->req_id;
969 rec->fsf_seqno = fsf_req->seq_no; 969 rec->fsf_seqno = fsf_req->seq_no;
970 rec->fsf_issued = fsf_req->issued; 970 rec->fsf_issued = fsf_req->issued;
971 } 971 }
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 4c362a9069f0..2074d45dbf6c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -47,13 +47,6 @@
47 47
48/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 48/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
49 49
50/* Adapter Identification Parameters */
51#define ZFCP_CONTROL_UNIT_TYPE 0x1731
52#define ZFCP_CONTROL_UNIT_MODEL 0x03
53#define ZFCP_DEVICE_TYPE 0x1732
54#define ZFCP_DEVICE_MODEL 0x03
55#define ZFCP_DEVICE_MODEL_PRIV 0x04
56
57/* DMQ bug workaround: don't use last SBALE */ 50/* DMQ bug workaround: don't use last SBALE */
58#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 51#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
59 52
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fdc9b4352a64..e50ea465bc2b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
880 zfcp_port_put(port); 880 zfcp_port_put(port);
881 return ZFCP_ERP_CONTINUES; 881 return ZFCP_ERP_CONTINUES;
882 } 882 }
883 /* fall through */
883 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 884 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
884 if (!port->d_id) 885 if (!port->d_id)
885 return ZFCP_ERP_FAILED; 886 return ZFCP_ERP_FAILED;
@@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
894 act->step = ZFCP_ERP_STEP_PORT_CLOSING; 895 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
895 return ZFCP_ERP_CONTINUES; 896 return ZFCP_ERP_CONTINUES;
896 } 897 }
897 /* fall through otherwise */
898 } 898 }
899 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
900 port->d_id = 0;
901 _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
902 return ZFCP_ERP_EXIT;
903 }
904 /* fall through otherwise */
899 } 905 }
900 return ZFCP_ERP_FAILED; 906 return ZFCP_ERP_FAILED;
901} 907}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2e31b536548c..120a9a1c81f7 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
27 27
28/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
30extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); 31extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
31 32
32/* zfcp_cfdc.c */ 33/* zfcp_cfdc.c */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 19ae0842047c..bb2752b4130f 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
150 struct zfcp_port *port; 150 struct zfcp_port *port;
151 151
152 read_lock_irqsave(&zfcp_data.config_lock, flags); 152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) 153 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
154 if ((port->d_id & range) == (elem->nport_did & range)) 154 if ((port->d_id & range) == (elem->nport_did & range))
155 zfcp_test_link(port); 155 zfcp_test_link(port);
156 if (!port->d_id)
157 zfcp_erp_port_reopen(port,
158 ZFCP_STATUS_COMMON_ERP_FAILED,
159 "fcrscn1", NULL);
160 }
156 161
157 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 162 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
158} 163}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 74dee32afba8..e6dae3744e79 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
526 break; 526 break;
527 case FSF_TOPO_AL: 527 case FSF_TOPO_AL:
528 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 528 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
529 /* fall through */
529 default: 530 default:
530 dev_err(&adapter->ccw_device->dev, 531 dev_err(&adapter->ccw_device->dev,
531 "Unknown or unsupported arbitrated loop " 532 "Unknown or unsupported arbitrated loop "
@@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
897 switch (fsq->word[0]) { 898 switch (fsq->word[0]) {
898 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 899 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
899 zfcp_test_link(unit->port); 900 zfcp_test_link(unit->port);
901 /* fall through */
900 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 902 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
901 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 903 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
902 break; 904 break;
@@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
993 break; 995 break;
994 case FSF_PORT_HANDLE_NOT_VALID: 996 case FSF_PORT_HANDLE_NOT_VALID:
995 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 997 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
998 /* fall through */
996 case FSF_GENERIC_COMMAND_REJECTED: 999 case FSF_GENERIC_COMMAND_REJECTED:
997 case FSF_PAYLOAD_SIZE_MISMATCH: 1000 case FSF_PAYLOAD_SIZE_MISMATCH:
998 case FSF_REQUEST_SIZE_TOO_LARGE: 1001 case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1399 struct fsf_plogi *plogi; 1402 struct fsf_plogi *plogi;
1400 1403
1401 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1404 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1402 return; 1405 goto out;
1403 1406
1404 switch (header->fsf_status) { 1407 switch (header->fsf_status) {
1405 case FSF_PORT_ALREADY_OPEN: 1408 case FSF_PORT_ALREADY_OPEN:
@@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1461 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1464 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1462 break; 1465 break;
1463 } 1466 }
1467
1468out:
1469 zfcp_port_put(port);
1464} 1470}
1465 1471
1466/** 1472/**
@@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1473 struct qdio_buffer_element *sbale; 1479 struct qdio_buffer_element *sbale;
1474 struct zfcp_adapter *adapter = erp_action->adapter; 1480 struct zfcp_adapter *adapter = erp_action->adapter;
1475 struct zfcp_fsf_req *req; 1481 struct zfcp_fsf_req *req;
1482 struct zfcp_port *port = erp_action->port;
1476 int retval = -EIO; 1483 int retval = -EIO;
1477 1484
1478 spin_lock_bh(&adapter->req_q_lock); 1485 spin_lock_bh(&adapter->req_q_lock);
@@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1493 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1500 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1494 1501
1495 req->handler = zfcp_fsf_open_port_handler; 1502 req->handler = zfcp_fsf_open_port_handler;
1496 req->qtcb->bottom.support.d_id = erp_action->port->d_id; 1503 req->qtcb->bottom.support.d_id = port->d_id;
1497 req->data = erp_action->port; 1504 req->data = port;
1498 req->erp_action = erp_action; 1505 req->erp_action = erp_action;
1499 erp_action->fsf_req = req; 1506 erp_action->fsf_req = req;
1507 zfcp_port_get(port);
1500 1508
1501 zfcp_fsf_start_erp_timer(req); 1509 zfcp_fsf_start_erp_timer(req);
1502 retval = zfcp_fsf_req_send(req); 1510 retval = zfcp_fsf_req_send(req);
1503 if (retval) { 1511 if (retval) {
1504 zfcp_fsf_req_free(req); 1512 zfcp_fsf_req_free(req);
1505 erp_action->fsf_req = NULL; 1513 erp_action->fsf_req = NULL;
1514 zfcp_port_put(port);
1506 } 1515 }
1507out: 1516out:
1508 spin_unlock_bh(&adapter->req_q_lock); 1517 spin_unlock_bh(&adapter->req_q_lock);
@@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1590 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1599 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1591 dev_warn(&req->adapter->ccw_device->dev, 1600 dev_warn(&req->adapter->ccw_device->dev,
1592 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1601 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1602 /* fall through */
1593 case FSF_ADAPTER_STATUS_AVAILABLE: 1603 case FSF_ADAPTER_STATUS_AVAILABLE:
1594 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1604 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1605 /* fall through */
1595 case FSF_ACCESS_DENIED: 1606 case FSF_ACCESS_DENIED:
1596 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1607 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1597 break; 1608 break;
@@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1876 1887
1877 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && 1888 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1878 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && 1889 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1879 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) { 1890 !zfcp_ccw_priv_sch(adapter)) {
1880 exclusive = (bottom->lun_access_info & 1891 exclusive = (bottom->lun_access_info &
1881 FSF_UNIT_ACCESS_EXCLUSIVE); 1892 FSF_UNIT_ACCESS_EXCLUSIVE);
1882 readwrite = (bottom->lun_access_info & 1893 readwrite = (bottom->lun_access_info &
@@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2314{ 2325{
2315 struct zfcp_fsf_req *req; 2326 struct zfcp_fsf_req *req;
2316 struct fcp_cmnd_iu *fcp_cmnd_iu; 2327 struct fcp_cmnd_iu *fcp_cmnd_iu;
2317 unsigned int sbtype; 2328 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2318 int real_bytes, retval = -EIO; 2329 int real_bytes, retval = -EIO;
2319 struct zfcp_adapter *adapter = unit->port->adapter; 2330 struct zfcp_adapter *adapter = unit->port->adapter;
2320 2331
@@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2356 switch (scsi_cmnd->sc_data_direction) { 2367 switch (scsi_cmnd->sc_data_direction) {
2357 case DMA_NONE: 2368 case DMA_NONE:
2358 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2369 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2359 sbtype = SBAL_FLAGS0_TYPE_READ;
2360 break; 2370 break;
2361 case DMA_FROM_DEVICE: 2371 case DMA_FROM_DEVICE:
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2372 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2363 sbtype = SBAL_FLAGS0_TYPE_READ;
2364 fcp_cmnd_iu->rddata = 1; 2373 fcp_cmnd_iu->rddata = 1;
2365 break; 2374 break;
2366 case DMA_TO_DEVICE: 2375 case DMA_TO_DEVICE:
@@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2369 fcp_cmnd_iu->wddata = 1; 2378 fcp_cmnd_iu->wddata = 1;
2370 break; 2379 break;
2371 case DMA_BIDIRECTIONAL: 2380 case DMA_BIDIRECTIONAL:
2372 default:
2373 retval = -EIO;
2374 goto failed_scsi_cmnd; 2381 goto failed_scsi_cmnd;
2375 } 2382 }
2376 2383
@@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2394 scsi_sglist(scsi_cmnd), 2401 scsi_sglist(scsi_cmnd),
2395 FSF_MAX_SBALS_PER_REQ); 2402 FSF_MAX_SBALS_PER_REQ);
2396 if (unlikely(real_bytes < 0)) { 2403 if (unlikely(real_bytes < 0)) {
2397 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) 2404 if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2398 retval = -EIO;
2399 else {
2400 dev_err(&adapter->ccw_device->dev, 2405 dev_err(&adapter->ccw_device->dev,
2401 "Oversize data package, unit 0x%016Lx " 2406 "Oversize data package, unit 0x%016Lx "
2402 "on port 0x%016Lx closed\n", 2407 "on port 0x%016Lx closed\n",
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e8fbeaeb5fbf..7d0da230eb63 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,10 @@
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14 14
15static unsigned int default_depth = 32;
16module_param_named(queue_depth, default_depth, uint, 0600);
17MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
18
15/* Find start of Sense Information in FCP response unit*/ 19/* Find start of Sense Information in FCP response unit*/
16char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) 20char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
17{ 21{
@@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
24 return fcp_sns_info_ptr; 28 return fcp_sns_info_ptr;
25} 29}
26 30
31static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
32{
33 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
34 return sdev->queue_depth;
35}
36
27static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 37static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
28{ 38{
29 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 39 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
34static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 44static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
35{ 45{
36 if (sdp->tagged_supported) 46 if (sdp->tagged_supported)
37 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32); 47 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
38 else 48 else
39 scsi_adjust_queue_depth(sdp, 0, 1); 49 scsi_adjust_queue_depth(sdp, 0, 1);
40 return 0; 50 return 0;
@@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
647 .name = "zfcp", 657 .name = "zfcp",
648 .module = THIS_MODULE, 658 .module = THIS_MODULE,
649 .proc_name = "zfcp", 659 .proc_name = "zfcp",
660 .change_queue_depth = zfcp_scsi_change_queue_depth,
650 .slave_alloc = zfcp_scsi_slave_alloc, 661 .slave_alloc = zfcp_scsi_slave_alloc,
651 .slave_configure = zfcp_scsi_slave_configure, 662 .slave_configure = zfcp_scsi_slave_configure,
652 .slave_destroy = zfcp_scsi_slave_destroy, 663 .slave_destroy = zfcp_scsi_slave_destroy,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index fb2740789b68..6a19ed9a1194 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
191 it has an enclosure device. Selecting this option will just allow 191 it has an enclosure device. Selecting this option will just allow
192 certain enclosure conditions to be reported and is not required. 192 certain enclosure conditions to be reported and is not required.
193 193
194comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
195 depends on SCSI
196
197config SCSI_MULTI_LUN 194config SCSI_MULTI_LUN
198 bool "Probe all LUNs on each SCSI device" 195 bool "Probe all LUNs on each SCSI device"
199 depends on SCSI 196 depends on SCSI
200 help 197 help
201 If you have a SCSI device that supports more than one LUN (Logical 198 Some devices support more than one LUN (Logical Unit Number) in order
202 Unit Number), e.g. a CD jukebox, and only one LUN is detected, you 199 to allow access to several media, e.g. CD jukebox, USB card reader,
203 can say Y here to force the SCSI driver to probe for multiple LUNs. 200 mobile phone in mass storage mode. This option forces the kernel to
204 A SCSI device with multiple LUNs acts logically like multiple SCSI 201 probe for all LUNs by default. This setting can be overriden by
205 devices. The vast majority of SCSI devices have only one LUN, and 202 max_luns boot/module parameter. Note that this option does not affect
206 so most people can say N here. The max_luns boot/module parameter 203 devices conforming to SCSI-3 or higher as they can explicitely report
207 allows to override this setting. 204 their number of LUNs. It is safe to say Y here unless you have one of
205 those rare devices which reacts in an unexpected way when probed for
206 multiple LUNs.
208 207
209config SCSI_CONSTANTS 208config SCSI_CONSTANTS
210 bool "Verbose SCSI error reporting (kernel size +=12K)" 209 bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
355 http://open-iscsi.org 354 http://open-iscsi.org
356 355
357source "drivers/scsi/cxgb3i/Kconfig" 356source "drivers/scsi/cxgb3i/Kconfig"
357source "drivers/scsi/bnx2i/Kconfig"
358 358
359config SGIWD93_SCSI 359config SGIWD93_SCSI
360 tristate "SGI WD93C93 SCSI Driver" 360 tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
508 508
509source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 509source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
510source "drivers/scsi/aic94xx/Kconfig" 510source "drivers/scsi/aic94xx/Kconfig"
511source "drivers/scsi/mvsas/Kconfig"
511 512
512config SCSI_DPT_I2O 513config SCSI_DPT_I2O
513 tristate "Adaptec I2O RAID support " 514 tristate "Adaptec I2O RAID support "
@@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
1050 1051
1051 Generally, saying N is fine. 1052 Generally, saying N is fine.
1052 1053
1053config SCSI_MVSAS
1054 tristate "Marvell 88SE6440 SAS/SATA support"
1055 depends on PCI && SCSI
1056 select SCSI_SAS_LIBSAS
1057 help
1058 This driver supports Marvell SAS/SATA PCI devices.
1059
1060 To compiler this driver as a module, choose M here: the module
1061 will be called mvsas.
1062
1063config SCSI_NCR53C406A 1054config SCSI_NCR53C406A
1064 tristate "NCR53c406a SCSI support" 1055 tristate "NCR53c406a SCSI support"
1065 depends on ISA && SCSI 1056 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a5049cfb40ed..25429ea63d0a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ 126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
128obj-$(CONFIG_SCSI_STEX) += stex.o 128obj-$(CONFIG_SCSI_STEX) += stex.o
129obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 129obj-$(CONFIG_SCSI_MVSAS) += mvsas/
130obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
132 133
133obj-$(CONFIG_ARM) += arm/ 134obj-$(CONFIG_ARM) += arm/
134 135
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index c889d8458684..1cdf09a4779a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
224 return ret; 224 return ret;
225} 225}
226 226
227static int 227static irqreturn_t
228NCR_D700_intr(int irq, void *data) 228NCR_D700_intr(int irq, void *data)
229{ 229{
230 struct NCR_D700_private *p = (struct NCR_D700_private *)data; 230 struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 000000000000..2fceb19eb27b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_CONSTANTS_H_
12#define __57XX_ISCSI_CONSTANTS_H_
13
14/**
15* This file defines HSI constants for the iSCSI flows
16*/
17
18/* iSCSI request op codes */
19#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
20
21/* iSCSI response/messages op codes */
22#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
23#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
24
25/* iSCSI task types */
26#define ISCSI_TASK_TYPE_READ (0)
27#define ISCSI_TASK_TYPE_WRITE (1)
28#define ISCSI_TASK_TYPE_MPATH (2)
29
30/* initial CQ sequence numbers */
31#define ISCSI_INITIAL_SN (1)
32
33/* KWQ (kernel work queue) layer codes */
34#define ISCSI_KWQE_LAYER_CODE (6)
35
36/* KWQ (kernel work queue) request op codes */
37#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
38#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
39#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
40#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
41#define ISCSI_KWQE_OPCODE_INIT1 (4)
42#define ISCSI_KWQE_OPCODE_INIT2 (5)
43
44/* KCQ (kernel completion queue) response op codes */
45#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
46#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
47#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
48#define ISCSI_KCQE_OPCODE_INIT (0x14)
49#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
50#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
51#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
52#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
53#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
54#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
55#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
56
57/* KCQ (kernel completion queue) completion status */
58#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
59#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
60#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
61#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
62#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
63
64#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
65#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
66
67#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
68#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
69#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
70#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
71#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
72
73/* Response */
74#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
75#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
76#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
77#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
78#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
79#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
80#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
81#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
82#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
83#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
84#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
85
86/* Data-In */
87#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
88#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
89#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
90#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
91#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
92#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
93
94/* R2T */
95#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
96#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
97#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
98#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
99#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
100#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
101#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
102#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
103#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
104
105/* TMF */
106#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
107#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
108#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
109#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
110
111/* IP/TCP processing errors: */
112#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
113#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
114#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
115#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
116
117/* iSCSI licensing errors */
118/* general iSCSI license not installed */
119#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
120/* additional LOM specific iSCSI license not installed */
121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
122
123/* SQ/RQ/CQ DB structure sizes */
124#define ISCSI_SQ_DB_SIZE (16)
125#define ISCSI_RQ_DB_SIZE (16)
126#define ISCSI_CQ_DB_SIZE (80)
127
128#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
129
130/* Page size codes (for flags field in connection offload request) */
131#define ISCSI_PAGE_SIZE_256 (0)
132#define ISCSI_PAGE_SIZE_512 (1)
133#define ISCSI_PAGE_SIZE_1K (2)
134#define ISCSI_PAGE_SIZE_2K (3)
135#define ISCSI_PAGE_SIZE_4K (4)
136#define ISCSI_PAGE_SIZE_8K (5)
137#define ISCSI_PAGE_SIZE_16K (6)
138#define ISCSI_PAGE_SIZE_32K (7)
139#define ISCSI_PAGE_SIZE_64K (8)
140#define ISCSI_PAGE_SIZE_128K (9)
141#define ISCSI_PAGE_SIZE_256K (10)
142#define ISCSI_PAGE_SIZE_512K (11)
143#define ISCSI_PAGE_SIZE_1M (12)
144#define ISCSI_PAGE_SIZE_2M (13)
145#define ISCSI_PAGE_SIZE_4M (14)
146#define ISCSI_PAGE_SIZE_8M (15)
147
148/* Iscsi PDU related defines */
149#define ISCSI_HEADER_SIZE (48)
150#define ISCSI_DIGEST_SHIFT (2)
151#define ISCSI_DIGEST_SIZE (4)
152
153#define B577XX_ISCSI_CONNECTION_TYPE 3
154
155#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 000000000000..36af1afef9b6
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_HSI_LINUX_LE__
12#define __57XX_ISCSI_HSI_LINUX_LE__
13
14/*
15 * iSCSI Async CQE
16 */
17struct bnx2i_async_msg {
18#if defined(__BIG_ENDIAN)
19 u8 op_code;
20 u8 reserved1;
21 u16 reserved0;
22#elif defined(__LITTLE_ENDIAN)
23 u16 reserved0;
24 u8 reserved1;
25 u8 op_code;
26#endif
27 u32 reserved2;
28 u32 exp_cmd_sn;
29 u32 max_cmd_sn;
30 u32 reserved3[2];
31#if defined(__BIG_ENDIAN)
32 u16 reserved5;
33 u8 err_code;
34 u8 reserved4;
35#elif defined(__LITTLE_ENDIAN)
36 u8 reserved4;
37 u8 err_code;
38 u16 reserved5;
39#endif
40 u32 reserved6;
41 u32 lun[2];
42#if defined(__BIG_ENDIAN)
43 u8 async_event;
44 u8 async_vcode;
45 u16 param1;
46#elif defined(__LITTLE_ENDIAN)
47 u16 param1;
48 u8 async_vcode;
49 u8 async_event;
50#endif
51#if defined(__BIG_ENDIAN)
52 u16 param2;
53 u16 param3;
54#elif defined(__LITTLE_ENDIAN)
55 u16 param3;
56 u16 param2;
57#endif
58 u32 reserved7[3];
59 u32 cq_req_sn;
60};
61
62
63/*
64 * iSCSI Buffer Descriptor (BD)
65 */
66struct iscsi_bd {
67 u32 buffer_addr_hi;
68 u32 buffer_addr_lo;
69#if defined(__BIG_ENDIAN)
70 u16 reserved0;
71 u16 buffer_length;
72#elif defined(__LITTLE_ENDIAN)
73 u16 buffer_length;
74 u16 reserved0;
75#endif
76#if defined(__BIG_ENDIAN)
77 u16 reserved3;
78 u16 flags;
79#define ISCSI_BD_RESERVED1 (0x3F<<0)
80#define ISCSI_BD_RESERVED1_SHIFT 0
81#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
82#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
83#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
84#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
85#define ISCSI_BD_RESERVED2 (0xFF<<8)
86#define ISCSI_BD_RESERVED2_SHIFT 8
87#elif defined(__LITTLE_ENDIAN)
88 u16 flags;
89#define ISCSI_BD_RESERVED1 (0x3F<<0)
90#define ISCSI_BD_RESERVED1_SHIFT 0
91#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
92#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
93#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
94#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
95#define ISCSI_BD_RESERVED2 (0xFF<<8)
96#define ISCSI_BD_RESERVED2_SHIFT 8
97 u16 reserved3;
98#endif
99};
100
101
102/*
103 * iSCSI Cleanup SQ WQE
104 */
105struct bnx2i_cleanup_request {
106#if defined(__BIG_ENDIAN)
107 u8 op_code;
108 u8 reserved1;
109 u16 reserved0;
110#elif defined(__LITTLE_ENDIAN)
111 u16 reserved0;
112 u8 reserved1;
113 u8 op_code;
114#endif
115 u32 reserved2[3];
116#if defined(__BIG_ENDIAN)
117 u16 reserved3;
118 u16 itt;
119#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
120#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
121#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
122#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
123#elif defined(__LITTLE_ENDIAN)
124 u16 itt;
125#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
126#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
127#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
128#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
129 u16 reserved3;
130#endif
131 u32 reserved4[10];
132#if defined(__BIG_ENDIAN)
133 u8 cq_index;
134 u8 reserved6;
135 u16 reserved5;
136#elif defined(__LITTLE_ENDIAN)
137 u16 reserved5;
138 u8 reserved6;
139 u8 cq_index;
140#endif
141};
142
143
144/*
145 * iSCSI Cleanup CQE
146 */
147struct bnx2i_cleanup_response {
148#if defined(__BIG_ENDIAN)
149 u8 op_code;
150 u8 status;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 status;
155 u8 op_code;
156#endif
157 u32 reserved1[3];
158 u32 reserved2[2];
159#if defined(__BIG_ENDIAN)
160 u16 reserved4;
161 u8 err_code;
162 u8 reserved3;
163#elif defined(__LITTLE_ENDIAN)
164 u8 reserved3;
165 u8 err_code;
166 u16 reserved4;
167#endif
168 u32 reserved5[7];
169#if defined(__BIG_ENDIAN)
170 u16 reserved6;
171 u16 itt;
172#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
173#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
174#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
175#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
176#elif defined(__LITTLE_ENDIAN)
177 u16 itt;
178#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
179#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
180#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
181#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
182 u16 reserved6;
183#endif
184 u32 cq_req_sn;
185};
186
187
188/*
189 * SCSI read/write SQ WQE
190 */
191struct bnx2i_cmd_request {
192#if defined(__BIG_ENDIAN)
193 u8 op_code;
194 u8 op_attr;
195#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
196#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
197#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
198#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
199#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
200#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
201#define ISCSI_CMD_REQUEST_READ (0x1<<6)
202#define ISCSI_CMD_REQUEST_READ_SHIFT 6
203#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
204#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
205 u16 reserved0;
206#elif defined(__LITTLE_ENDIAN)
207 u16 reserved0;
208 u8 op_attr;
209#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
210#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
211#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
212#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
213#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
214#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
215#define ISCSI_CMD_REQUEST_READ (0x1<<6)
216#define ISCSI_CMD_REQUEST_READ_SHIFT 6
217#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
218#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
219 u8 op_code;
220#endif
221#if defined(__BIG_ENDIAN)
222 u16 ud_buffer_offset;
223 u16 sd_buffer_offset;
224#elif defined(__LITTLE_ENDIAN)
225 u16 sd_buffer_offset;
226 u16 ud_buffer_offset;
227#endif
228 u32 lun[2];
229#if defined(__BIG_ENDIAN)
230 u16 reserved2;
231 u16 itt;
232#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
233#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
234#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
235#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
236#elif defined(__LITTLE_ENDIAN)
237 u16 itt;
238#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
239#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
240#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
241#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
242 u16 reserved2;
243#endif
244 u32 total_data_transfer_length;
245 u32 cmd_sn;
246 u32 reserved3;
247 u32 cdb[4];
248 u32 zero_fill;
249 u32 bd_list_addr_lo;
250 u32 bd_list_addr_hi;
251#if defined(__BIG_ENDIAN)
252 u8 cq_index;
253 u8 sd_start_bd_index;
254 u8 ud_start_bd_index;
255 u8 num_bds;
256#elif defined(__LITTLE_ENDIAN)
257 u8 num_bds;
258 u8 ud_start_bd_index;
259 u8 sd_start_bd_index;
260 u8 cq_index;
261#endif
262};
263
264
265/*
266 * task statistics for write response
267 */
268struct bnx2i_write_resp_task_stat {
269 u32 num_data_ins;
270};
271
272/*
273 * task statistics for read response
274 */
275struct bnx2i_read_resp_task_stat {
276#if defined(__BIG_ENDIAN)
277 u16 num_data_outs;
278 u16 num_r2ts;
279#elif defined(__LITTLE_ENDIAN)
280 u16 num_r2ts;
281 u16 num_data_outs;
282#endif
283};
284
285/*
286 * task statistics for iSCSI cmd response
287 */
288union bnx2i_cmd_resp_task_stat {
289 struct bnx2i_write_resp_task_stat write_stat;
290 struct bnx2i_read_resp_task_stat read_stat;
291};
292
293/*
294 * SCSI Command CQE
295 */
296struct bnx2i_cmd_response {
297#if defined(__BIG_ENDIAN)
298 u8 op_code;
299 u8 response_flags;
300#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
301#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
302#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
303#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
304#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
305#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
306#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
307#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
308#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
309#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
310#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
311#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
312 u8 response;
313 u8 status;
314#elif defined(__LITTLE_ENDIAN)
315 u8 status;
316 u8 response;
317 u8 response_flags;
318#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
319#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
320#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
321#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
322#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
323#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
324#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
325#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
326#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
327#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
328#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
329#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
330 u8 op_code;
331#endif
332 u32 data_length;
333 u32 exp_cmd_sn;
334 u32 max_cmd_sn;
335 u32 reserved2;
336 u32 residual_count;
337#if defined(__BIG_ENDIAN)
338 u16 reserved4;
339 u8 err_code;
340 u8 reserved3;
341#elif defined(__LITTLE_ENDIAN)
342 u8 reserved3;
343 u8 err_code;
344 u16 reserved4;
345#endif
346 u32 reserved5[5];
347 union bnx2i_cmd_resp_task_stat task_stat;
348 u32 reserved6;
349#if defined(__BIG_ENDIAN)
350 u16 reserved7;
351 u16 itt;
352#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
353#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
354#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
355#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
356#elif defined(__LITTLE_ENDIAN)
357 u16 itt;
358#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
359#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
360#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
361#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
362 u16 reserved7;
363#endif
364 u32 cq_req_sn;
365};
366
367
368
369/*
370 * firmware middle-path request SQ WQE
371 */
372struct bnx2i_fw_mp_request {
373#if defined(__BIG_ENDIAN)
374 u8 op_code;
375 u8 op_attr;
376 u16 hdr_opaque1;
377#elif defined(__LITTLE_ENDIAN)
378 u16 hdr_opaque1;
379 u8 op_attr;
380 u8 op_code;
381#endif
382 u32 data_length;
383 u32 hdr_opaque2[2];
384#if defined(__BIG_ENDIAN)
385 u16 reserved0;
386 u16 itt;
387#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
388#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
389#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
390#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
391#elif defined(__LITTLE_ENDIAN)
392 u16 itt;
393#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
394#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
395#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
396#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
397 u16 reserved0;
398#endif
399 u32 hdr_opaque3[4];
400 u32 resp_bd_list_addr_lo;
401 u32 resp_bd_list_addr_hi;
402 u32 resp_buffer;
403#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
404#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
405#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
406#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
407#if defined(__BIG_ENDIAN)
408 u16 reserved4;
409 u8 reserved3;
410 u8 flags;
411#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
412#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
413#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
414#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
415#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
416#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
417#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
418#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
419#elif defined(__LITTLE_ENDIAN)
420 u8 flags;
421#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
422#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
423#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
424#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
425#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
426#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
427#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
428#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
429 u8 reserved3;
430 u16 reserved4;
431#endif
432 u32 bd_list_addr_lo;
433 u32 bd_list_addr_hi;
434#if defined(__BIG_ENDIAN)
435 u8 cq_index;
436 u8 reserved6;
437 u8 reserved5;
438 u8 num_bds;
439#elif defined(__LITTLE_ENDIAN)
440 u8 num_bds;
441 u8 reserved5;
442 u8 reserved6;
443 u8 cq_index;
444#endif
445};
446
447
448/*
449 * firmware response - CQE: used only by firmware
450 */
451struct bnx2i_fw_response {
452 u32 hdr_dword1[2];
453 u32 hdr_exp_cmd_sn;
454 u32 hdr_max_cmd_sn;
455 u32 hdr_ttt;
456 u32 hdr_res_cnt;
457 u32 cqe_flags;
458#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
459#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
460#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
461#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
462#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
463#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
464 u32 stat_sn;
465 u32 hdr_dword2[2];
466 u32 hdr_dword3[2];
467 u32 task_stat;
468 u32 reserved0;
469 u32 hdr_itt;
470 u32 cq_req_sn;
471};
472
473
474/*
475 * iSCSI KCQ CQE parameters
476 */
477union iscsi_kcqe_params {
478 u32 reserved0[4];
479};
480
481/*
482 * iSCSI KCQ CQE
483 */
484struct iscsi_kcqe {
485 u32 iscsi_conn_id;
486 u32 completion_status;
487 u32 iscsi_conn_context_id;
488 union iscsi_kcqe_params params;
489#if defined(__BIG_ENDIAN)
490 u8 flags;
491#define ISCSI_KCQE_RESERVED0 (0xF<<0)
492#define ISCSI_KCQE_RESERVED0_SHIFT 0
493#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
494#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
495#define ISCSI_KCQE_RESERVED1 (0x1<<7)
496#define ISCSI_KCQE_RESERVED1_SHIFT 7
497 u8 op_code;
498 u16 qe_self_seq;
499#elif defined(__LITTLE_ENDIAN)
500 u16 qe_self_seq;
501 u8 op_code;
502 u8 flags;
503#define ISCSI_KCQE_RESERVED0 (0xF<<0)
504#define ISCSI_KCQE_RESERVED0_SHIFT 0
505#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
506#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
507#define ISCSI_KCQE_RESERVED1 (0x1<<7)
508#define ISCSI_KCQE_RESERVED1_SHIFT 7
509#endif
510};
511
512
513
514/*
515 * iSCSI KWQE header
516 */
517struct iscsi_kwqe_header {
518#if defined(__BIG_ENDIAN)
519 u8 flags;
520#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
521#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
522#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
523#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
524#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
525#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
526 u8 op_code;
527#elif defined(__LITTLE_ENDIAN)
528 u8 op_code;
529 u8 flags;
530#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
531#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
532#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
533#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
534#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
535#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
536#endif
537};
538
539/*
540 * iSCSI firmware init request 1
541 */
542struct iscsi_kwqe_init1 {
543#if defined(__BIG_ENDIAN)
544 struct iscsi_kwqe_header hdr;
545 u8 reserved0;
546 u8 num_cqs;
547#elif defined(__LITTLE_ENDIAN)
548 u8 num_cqs;
549 u8 reserved0;
550 struct iscsi_kwqe_header hdr;
551#endif
552 u32 dummy_buffer_addr_lo;
553 u32 dummy_buffer_addr_hi;
554#if defined(__BIG_ENDIAN)
555 u16 num_ccells_per_conn;
556 u16 num_tasks_per_conn;
557#elif defined(__LITTLE_ENDIAN)
558 u16 num_tasks_per_conn;
559 u16 num_ccells_per_conn;
560#endif
561#if defined(__BIG_ENDIAN)
562 u16 sq_wqes_per_page;
563 u16 sq_num_wqes;
564#elif defined(__LITTLE_ENDIAN)
565 u16 sq_num_wqes;
566 u16 sq_wqes_per_page;
567#endif
568#if defined(__BIG_ENDIAN)
569 u8 cq_log_wqes_per_page;
570 u8 flags;
571#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
572#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
573#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
574#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
575#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
576#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
577#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
578#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
579 u16 cq_num_wqes;
580#elif defined(__LITTLE_ENDIAN)
581 u16 cq_num_wqes;
582 u8 flags;
583#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
584#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
585#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
586#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
587#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
588#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
589#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
590#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
591 u8 cq_log_wqes_per_page;
592#endif
593#if defined(__BIG_ENDIAN)
594 u16 cq_num_pages;
595 u16 sq_num_pages;
596#elif defined(__LITTLE_ENDIAN)
597 u16 sq_num_pages;
598 u16 cq_num_pages;
599#endif
600#if defined(__BIG_ENDIAN)
601 u16 rq_buffer_size;
602 u16 rq_num_wqes;
603#elif defined(__LITTLE_ENDIAN)
604 u16 rq_num_wqes;
605 u16 rq_buffer_size;
606#endif
607};
608
609/*
610 * iSCSI firmware init request 2
611 */
612struct iscsi_kwqe_init2 {
613#if defined(__BIG_ENDIAN)
614 struct iscsi_kwqe_header hdr;
615 u16 max_cq_sqn;
616#elif defined(__LITTLE_ENDIAN)
617 u16 max_cq_sqn;
618 struct iscsi_kwqe_header hdr;
619#endif
620 u32 error_bit_map[2];
621 u32 reserved1[5];
622};
623
624/*
625 * Initial iSCSI connection offload request 1
626 */
627struct iscsi_kwqe_conn_offload1 {
628#if defined(__BIG_ENDIAN)
629 struct iscsi_kwqe_header hdr;
630 u16 iscsi_conn_id;
631#elif defined(__LITTLE_ENDIAN)
632 u16 iscsi_conn_id;
633 struct iscsi_kwqe_header hdr;
634#endif
635 u32 sq_page_table_addr_lo;
636 u32 sq_page_table_addr_hi;
637 u32 cq_page_table_addr_lo;
638 u32 cq_page_table_addr_hi;
639 u32 reserved0[3];
640};
641
642/*
643 * iSCSI Page Table Entry (PTE)
644 */
645struct iscsi_pte {
646 u32 hi;
647 u32 lo;
648};
649
650/*
651 * Initial iSCSI connection offload request 2
652 */
653struct iscsi_kwqe_conn_offload2 {
654#if defined(__BIG_ENDIAN)
655 struct iscsi_kwqe_header hdr;
656 u16 reserved0;
657#elif defined(__LITTLE_ENDIAN)
658 u16 reserved0;
659 struct iscsi_kwqe_header hdr;
660#endif
661 u32 rq_page_table_addr_lo;
662 u32 rq_page_table_addr_hi;
663 struct iscsi_pte sq_first_pte;
664 struct iscsi_pte cq_first_pte;
665 u32 num_additional_wqes;
666};
667
668
669/*
670 * Initial iSCSI connection offload request 3
671 */
672struct iscsi_kwqe_conn_offload3 {
673#if defined(__BIG_ENDIAN)
674 struct iscsi_kwqe_header hdr;
675 u16 reserved0;
676#elif defined(__LITTLE_ENDIAN)
677 u16 reserved0;
678 struct iscsi_kwqe_header hdr;
679#endif
680 u32 reserved1;
681 struct iscsi_pte qp_first_pte[3];
682};
683
684
685/*
686 * iSCSI connection update request
687 */
688struct iscsi_kwqe_conn_update {
689#if defined(__BIG_ENDIAN)
690 struct iscsi_kwqe_header hdr;
691 u16 reserved0;
692#elif defined(__LITTLE_ENDIAN)
693 u16 reserved0;
694 struct iscsi_kwqe_header hdr;
695#endif
696#if defined(__BIG_ENDIAN)
697 u8 session_error_recovery_level;
698 u8 max_outstanding_r2ts;
699 u8 reserved2;
700 u8 conn_flags;
701#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
702#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
703#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
704#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
705#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
706#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
707#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
708#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
709#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
710#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
711#elif defined(__LITTLE_ENDIAN)
712 u8 conn_flags;
713#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
714#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
715#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
716#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
717#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
718#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
719#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
720#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
721#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
722#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
723 u8 reserved2;
724 u8 max_outstanding_r2ts;
725 u8 session_error_recovery_level;
726#endif
727 u32 context_id;
728 u32 max_send_pdu_length;
729 u32 max_recv_pdu_length;
730 u32 first_burst_length;
731 u32 max_burst_length;
732 u32 exp_stat_sn;
733};
734
735/*
736 * iSCSI destroy connection request
737 */
738struct iscsi_kwqe_conn_destroy {
739#if defined(__BIG_ENDIAN)
740 struct iscsi_kwqe_header hdr;
741 u16 reserved0;
742#elif defined(__LITTLE_ENDIAN)
743 u16 reserved0;
744 struct iscsi_kwqe_header hdr;
745#endif
746 u32 context_id;
747 u32 reserved1[6];
748};
749
750/*
751 * iSCSI KWQ WQE
752 */
753union iscsi_kwqe {
754 struct iscsi_kwqe_init1 init1;
755 struct iscsi_kwqe_init2 init2;
756 struct iscsi_kwqe_conn_offload1 conn_offload1;
757 struct iscsi_kwqe_conn_offload2 conn_offload2;
758 struct iscsi_kwqe_conn_update conn_update;
759 struct iscsi_kwqe_conn_destroy conn_destroy;
760};
761
762/*
763 * iSCSI Login SQ WQE
764 */
765struct bnx2i_login_request {
766#if defined(__BIG_ENDIAN)
767 u8 op_code;
768 u8 op_attr;
769#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
770#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
771#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
772#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
773#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
774#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
775#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
776#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
777#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
778#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
779 u8 version_max;
780 u8 version_min;
781#elif defined(__LITTLE_ENDIAN)
782 u8 version_min;
783 u8 version_max;
784 u8 op_attr;
785#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
786#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
787#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
788#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
789#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
790#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
791#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
792#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
793#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
794#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
795 u8 op_code;
796#endif
797 u32 data_length;
798 u32 isid_lo;
799#if defined(__BIG_ENDIAN)
800 u16 isid_hi;
801 u16 tsih;
802#elif defined(__LITTLE_ENDIAN)
803 u16 tsih;
804 u16 isid_hi;
805#endif
806#if defined(__BIG_ENDIAN)
807 u16 reserved2;
808 u16 itt;
809#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
810#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
811#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
812#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
813#elif defined(__LITTLE_ENDIAN)
814 u16 itt;
815#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
816#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
817#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
818#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
819 u16 reserved2;
820#endif
821#if defined(__BIG_ENDIAN)
822 u16 cid;
823 u16 reserved3;
824#elif defined(__LITTLE_ENDIAN)
825 u16 reserved3;
826 u16 cid;
827#endif
828 u32 cmd_sn;
829 u32 exp_stat_sn;
830 u32 reserved4;
831 u32 resp_bd_list_addr_lo;
832 u32 resp_bd_list_addr_hi;
833 u32 resp_buffer;
834#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
835#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
836#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
837#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
838#if defined(__BIG_ENDIAN)
839 u16 reserved8;
840 u8 reserved7;
841 u8 flags;
842#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
843#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
844#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
845#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
846#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
847#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
848#elif defined(__LITTLE_ENDIAN)
849 u8 flags;
850#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
851#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
852#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
853#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
854#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
855#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
856 u8 reserved7;
857 u16 reserved8;
858#endif
859 u32 bd_list_addr_lo;
860 u32 bd_list_addr_hi;
861#if defined(__BIG_ENDIAN)
862 u8 cq_index;
863 u8 reserved10;
864 u8 reserved9;
865 u8 num_bds;
866#elif defined(__LITTLE_ENDIAN)
867 u8 num_bds;
868 u8 reserved9;
869 u8 reserved10;
870 u8 cq_index;
871#endif
872};
873
874
875/*
876 * iSCSI Login CQE
877 */
878struct bnx2i_login_response {
879#if defined(__BIG_ENDIAN)
880 u8 op_code;
881 u8 response_flags;
882#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
883#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
884#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
885#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
886#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
887#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
888#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
889#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
890#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
891#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
892 u8 version_max;
893 u8 version_active;
894#elif defined(__LITTLE_ENDIAN)
895 u8 version_active;
896 u8 version_max;
897 u8 response_flags;
898#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
899#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
900#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
901#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
902#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
903#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
904#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
905#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
906#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
907#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
908 u8 op_code;
909#endif
910 u32 data_length;
911 u32 exp_cmd_sn;
912 u32 max_cmd_sn;
913 u32 reserved1[2];
914#if defined(__BIG_ENDIAN)
915 u16 reserved3;
916 u8 err_code;
917 u8 reserved2;
918#elif defined(__LITTLE_ENDIAN)
919 u8 reserved2;
920 u8 err_code;
921 u16 reserved3;
922#endif
923 u32 stat_sn;
924 u32 isid_lo;
925#if defined(__BIG_ENDIAN)
926 u16 isid_hi;
927 u16 tsih;
928#elif defined(__LITTLE_ENDIAN)
929 u16 tsih;
930 u16 isid_hi;
931#endif
932#if defined(__BIG_ENDIAN)
933 u8 status_class;
934 u8 status_detail;
935 u16 reserved4;
936#elif defined(__LITTLE_ENDIAN)
937 u16 reserved4;
938 u8 status_detail;
939 u8 status_class;
940#endif
941 u32 reserved5[3];
942#if defined(__BIG_ENDIAN)
943 u16 reserved6;
944 u16 itt;
945#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
946#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
947#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
948#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
949#elif defined(__LITTLE_ENDIAN)
950 u16 itt;
951#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
952#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
953#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
954#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
955 u16 reserved6;
956#endif
957 u32 cq_req_sn;
958};
959
960
961/*
962 * iSCSI Logout SQ WQE
963 */
964struct bnx2i_logout_request {
965#if defined(__BIG_ENDIAN)
966 u8 op_code;
967 u8 op_attr;
968#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
969#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
970#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
971#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
972 u16 reserved0;
973#elif defined(__LITTLE_ENDIAN)
974 u16 reserved0;
975 u8 op_attr;
976#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
977#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
978#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
979#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
980 u8 op_code;
981#endif
982 u32 data_length;
983 u32 reserved1[2];
984#if defined(__BIG_ENDIAN)
985 u16 reserved2;
986 u16 itt;
987#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
988#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
989#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
990#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
991#elif defined(__LITTLE_ENDIAN)
992 u16 itt;
993#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
994#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
995#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
996#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
997 u16 reserved2;
998#endif
999#if defined(__BIG_ENDIAN)
1000 u16 cid;
1001 u16 reserved3;
1002#elif defined(__LITTLE_ENDIAN)
1003 u16 reserved3;
1004 u16 cid;
1005#endif
1006 u32 cmd_sn;
1007 u32 reserved4[5];
1008 u32 zero_fill;
1009 u32 bd_list_addr_lo;
1010 u32 bd_list_addr_hi;
1011#if defined(__BIG_ENDIAN)
1012 u8 cq_index;
1013 u8 reserved6;
1014 u8 reserved5;
1015 u8 num_bds;
1016#elif defined(__LITTLE_ENDIAN)
1017 u8 num_bds;
1018 u8 reserved5;
1019 u8 reserved6;
1020 u8 cq_index;
1021#endif
1022};
1023
1024
1025/*
1026 * iSCSI Logout CQE
1027 */
1028struct bnx2i_logout_response {
1029#if defined(__BIG_ENDIAN)
1030 u8 op_code;
1031 u8 reserved1;
1032 u8 response;
1033 u8 reserved0;
1034#elif defined(__LITTLE_ENDIAN)
1035 u8 reserved0;
1036 u8 response;
1037 u8 reserved1;
1038 u8 op_code;
1039#endif
1040 u32 reserved2;
1041 u32 exp_cmd_sn;
1042 u32 max_cmd_sn;
1043 u32 reserved3[2];
1044#if defined(__BIG_ENDIAN)
1045 u16 reserved5;
1046 u8 err_code;
1047 u8 reserved4;
1048#elif defined(__LITTLE_ENDIAN)
1049 u8 reserved4;
1050 u8 err_code;
1051 u16 reserved5;
1052#endif
1053 u32 reserved6[3];
1054#if defined(__BIG_ENDIAN)
1055 u16 time_to_wait;
1056 u16 time_to_retain;
1057#elif defined(__LITTLE_ENDIAN)
1058 u16 time_to_retain;
1059 u16 time_to_wait;
1060#endif
1061 u32 reserved7[3];
1062#if defined(__BIG_ENDIAN)
1063 u16 reserved8;
1064 u16 itt;
1065#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1066#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1067#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1068#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1069#elif defined(__LITTLE_ENDIAN)
1070 u16 itt;
1071#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1072#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1073#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1074#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1075 u16 reserved8;
1076#endif
1077 u32 cq_req_sn;
1078};
1079
1080
1081/*
1082 * iSCSI Nop-In CQE
1083 */
1084struct bnx2i_nop_in_msg {
1085#if defined(__BIG_ENDIAN)
1086 u8 op_code;
1087 u8 reserved1;
1088 u16 reserved0;
1089#elif defined(__LITTLE_ENDIAN)
1090 u16 reserved0;
1091 u8 reserved1;
1092 u8 op_code;
1093#endif
1094 u32 data_length;
1095 u32 exp_cmd_sn;
1096 u32 max_cmd_sn;
1097 u32 ttt;
1098 u32 reserved2;
1099#if defined(__BIG_ENDIAN)
1100 u16 reserved4;
1101 u8 err_code;
1102 u8 reserved3;
1103#elif defined(__LITTLE_ENDIAN)
1104 u8 reserved3;
1105 u8 err_code;
1106 u16 reserved4;
1107#endif
1108 u32 reserved5;
1109 u32 lun[2];
1110 u32 reserved6[4];
1111#if defined(__BIG_ENDIAN)
1112 u16 reserved7;
1113 u16 itt;
1114#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1115#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1116#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1117#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1118#elif defined(__LITTLE_ENDIAN)
1119 u16 itt;
1120#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1121#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1122#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1123#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1124 u16 reserved7;
1125#endif
1126 u32 cq_req_sn;
1127};
1128
1129
1130/*
1131 * iSCSI NOP-OUT SQ WQE
1132 */
1133struct bnx2i_nop_out_request {
1134#if defined(__BIG_ENDIAN)
1135 u8 op_code;
1136 u8 op_attr;
1137#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1138#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1139#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1140#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1141 u16 reserved0;
1142#elif defined(__LITTLE_ENDIAN)
1143 u16 reserved0;
1144 u8 op_attr;
1145#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1146#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1147#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1148#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1149 u8 op_code;
1150#endif
1151 u32 data_length;
1152 u32 lun[2];
1153#if defined(__BIG_ENDIAN)
1154 u16 reserved2;
1155 u16 itt;
1156#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1157#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1158#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1159#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1160#elif defined(__LITTLE_ENDIAN)
1161 u16 itt;
1162#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1163#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1164#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1165#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1166 u16 reserved2;
1167#endif
1168 u32 ttt;
1169 u32 cmd_sn;
1170 u32 reserved3[2];
1171 u32 resp_bd_list_addr_lo;
1172 u32 resp_bd_list_addr_hi;
1173 u32 resp_buffer;
1174#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1175#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1176#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1177#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
1178#if defined(__BIG_ENDIAN)
1179 u16 reserved7;
1180 u8 reserved6;
1181 u8 flags;
1182#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1183#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1184#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1185#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1186#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1187#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1188#elif defined(__LITTLE_ENDIAN)
1189 u8 flags;
1190#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1191#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1192#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1193#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1194#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1195#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1196 u8 reserved6;
1197 u16 reserved7;
1198#endif
1199 u32 bd_list_addr_lo;
1200 u32 bd_list_addr_hi;
1201#if defined(__BIG_ENDIAN)
1202 u8 cq_index;
1203 u8 reserved9;
1204 u8 reserved8;
1205 u8 num_bds;
1206#elif defined(__LITTLE_ENDIAN)
1207 u8 num_bds;
1208 u8 reserved8;
1209 u8 reserved9;
1210 u8 cq_index;
1211#endif
1212};
1213
1214/*
1215 * iSCSI Reject CQE
1216 */
1217struct bnx2i_reject_msg {
1218#if defined(__BIG_ENDIAN)
1219 u8 op_code;
1220 u8 reserved1;
1221 u8 reason;
1222 u8 reserved0;
1223#elif defined(__LITTLE_ENDIAN)
1224 u8 reserved0;
1225 u8 reason;
1226 u8 reserved1;
1227 u8 op_code;
1228#endif
1229 u32 data_length;
1230 u32 exp_cmd_sn;
1231 u32 max_cmd_sn;
1232 u32 reserved2[2];
1233#if defined(__BIG_ENDIAN)
1234 u16 reserved4;
1235 u8 err_code;
1236 u8 reserved3;
1237#elif defined(__LITTLE_ENDIAN)
1238 u8 reserved3;
1239 u8 err_code;
1240 u16 reserved4;
1241#endif
1242 u32 reserved5[8];
1243 u32 cq_req_sn;
1244};
1245
1246/*
1247 * bnx2i iSCSI TMF SQ WQE
1248 */
1249struct bnx2i_tmf_request {
1250#if defined(__BIG_ENDIAN)
1251 u8 op_code;
1252 u8 op_attr;
1253#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1254#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1255#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1256#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1257 u16 reserved0;
1258#elif defined(__LITTLE_ENDIAN)
1259 u16 reserved0;
1260 u8 op_attr;
1261#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1262#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1263#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1264#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1265 u8 op_code;
1266#endif
1267 u32 data_length;
1268 u32 lun[2];
1269#if defined(__BIG_ENDIAN)
1270 u16 reserved1;
1271 u16 itt;
1272#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1273#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1274#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1275#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1276#elif defined(__LITTLE_ENDIAN)
1277 u16 itt;
1278#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1279#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1280#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1281#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1282 u16 reserved1;
1283#endif
1284 u32 ref_itt;
1285 u32 cmd_sn;
1286 u32 reserved2;
1287 u32 ref_cmd_sn;
1288 u32 reserved3[3];
1289 u32 zero_fill;
1290 u32 bd_list_addr_lo;
1291 u32 bd_list_addr_hi;
1292#if defined(__BIG_ENDIAN)
1293 u8 cq_index;
1294 u8 reserved5;
1295 u8 reserved4;
1296 u8 num_bds;
1297#elif defined(__LITTLE_ENDIAN)
1298 u8 num_bds;
1299 u8 reserved4;
1300 u8 reserved5;
1301 u8 cq_index;
1302#endif
1303};
1304
1305/*
1306 * iSCSI Text SQ WQE
1307 */
1308struct bnx2i_text_request {
1309#if defined(__BIG_ENDIAN)
1310 u8 op_code;
1311 u8 op_attr;
1312#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1313#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1314#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1315#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1316#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1317#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1318 u16 reserved0;
1319#elif defined(__LITTLE_ENDIAN)
1320 u16 reserved0;
1321 u8 op_attr;
1322#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1323#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1324#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1325#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1326#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1327#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1328 u8 op_code;
1329#endif
1330 u32 data_length;
1331 u32 lun[2];
1332#if defined(__BIG_ENDIAN)
1333 u16 reserved3;
1334 u16 itt;
1335#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1336#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1337#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1338#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1339#elif defined(__LITTLE_ENDIAN)
1340 u16 itt;
1341#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1342#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1343#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1344#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1345 u16 reserved3;
1346#endif
1347 u32 ttt;
1348 u32 cmd_sn;
1349 u32 reserved4[2];
1350 u32 resp_bd_list_addr_lo;
1351 u32 resp_bd_list_addr_hi;
1352 u32 resp_buffer;
1353#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1354#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1355#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1356#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
1357 u32 zero_fill;
1358 u32 bd_list_addr_lo;
1359 u32 bd_list_addr_hi;
1360#if defined(__BIG_ENDIAN)
1361 u8 cq_index;
1362 u8 reserved7;
1363 u8 reserved6;
1364 u8 num_bds;
1365#elif defined(__LITTLE_ENDIAN)
1366 u8 num_bds;
1367 u8 reserved6;
1368 u8 reserved7;
1369 u8 cq_index;
1370#endif
1371};
1372
1373/*
1374 * iSCSI SQ WQE
1375 */
1376union iscsi_request {
1377 struct bnx2i_cmd_request cmd;
1378 struct bnx2i_tmf_request tmf;
1379 struct bnx2i_nop_out_request nop_out;
1380 struct bnx2i_login_request login_req;
1381 struct bnx2i_text_request text;
1382 struct bnx2i_logout_request logout_req;
1383 struct bnx2i_cleanup_request cleanup;
1384};
1385
1386
1387/*
1388 * iSCSI TMF CQE
1389 */
1390struct bnx2i_tmf_response {
1391#if defined(__BIG_ENDIAN)
1392 u8 op_code;
1393 u8 reserved1;
1394 u8 response;
1395 u8 reserved0;
1396#elif defined(__LITTLE_ENDIAN)
1397 u8 reserved0;
1398 u8 response;
1399 u8 reserved1;
1400 u8 op_code;
1401#endif
1402 u32 reserved2;
1403 u32 exp_cmd_sn;
1404 u32 max_cmd_sn;
1405 u32 reserved3[2];
1406#if defined(__BIG_ENDIAN)
1407 u16 reserved5;
1408 u8 err_code;
1409 u8 reserved4;
1410#elif defined(__LITTLE_ENDIAN)
1411 u8 reserved4;
1412 u8 err_code;
1413 u16 reserved5;
1414#endif
1415 u32 reserved6[7];
1416#if defined(__BIG_ENDIAN)
1417 u16 reserved7;
1418 u16 itt;
1419#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1420#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1421#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1422#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1423#elif defined(__LITTLE_ENDIAN)
1424 u16 itt;
1425#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1426#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1427#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1428#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1429 u16 reserved7;
1430#endif
1431 u32 cq_req_sn;
1432};
1433
1434/*
1435 * iSCSI Text CQE
1436 */
1437struct bnx2i_text_response {
1438#if defined(__BIG_ENDIAN)
1439 u8 op_code;
1440 u8 response_flags;
1441#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1442#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1443#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1444#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1445#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1446#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1447 u16 reserved0;
1448#elif defined(__LITTLE_ENDIAN)
1449 u16 reserved0;
1450 u8 response_flags;
1451#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1452#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1453#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1454#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1455#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1456#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1457 u8 op_code;
1458#endif
1459 u32 data_length;
1460 u32 exp_cmd_sn;
1461 u32 max_cmd_sn;
1462 u32 ttt;
1463 u32 reserved2;
1464#if defined(__BIG_ENDIAN)
1465 u16 reserved4;
1466 u8 err_code;
1467 u8 reserved3;
1468#elif defined(__LITTLE_ENDIAN)
1469 u8 reserved3;
1470 u8 err_code;
1471 u16 reserved4;
1472#endif
1473 u32 reserved5;
1474 u32 lun[2];
1475 u32 reserved6[4];
1476#if defined(__BIG_ENDIAN)
1477 u16 reserved7;
1478 u16 itt;
1479#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1480#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1481#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1482#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1483#elif defined(__LITTLE_ENDIAN)
1484 u16 itt;
1485#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1486#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1487#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1488#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1489 u16 reserved7;
1490#endif
1491 u32 cq_req_sn;
1492};
1493
1494/*
1495 * iSCSI CQE
1496 */
1497union iscsi_response {
1498 struct bnx2i_cmd_response cmd;
1499 struct bnx2i_tmf_response tmf;
1500 struct bnx2i_login_response login_resp;
1501 struct bnx2i_text_response text;
1502 struct bnx2i_logout_response logout_resp;
1503 struct bnx2i_cleanup_response cleanup;
1504 struct bnx2i_reject_msg reject;
1505 struct bnx2i_async_msg async;
1506 struct bnx2i_nop_in_msg nop_in;
1507};
1508
1509#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 000000000000..820d428ae839
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_BNX2_ISCSI
2 tristate "Broadcom NetXtreme II iSCSI support"
3 select SCSI_ISCSI_ATTRS
4 select CNIC
5 ---help---
6 This driver supports iSCSI offload for the Broadcom NetXtreme II
7 devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 000000000000..b5802bd2e76a
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
1bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
2
3obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 000000000000..d7576f28c6e9
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#ifndef _BNX2I_H_
15#define _BNX2I_H_
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/in.h>
26#include <linux/kfifo.h>
27#include <linux/netdevice.h>
28#include <linux/completion.h>
29
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi.h>
35#include <scsi/iscsi_proto.h>
36#include <scsi/libiscsi.h>
37#include <scsi/scsi_transport_iscsi.h>
38
39#include "../../net/cnic_if.h"
40#include "57xx_iscsi_hsi.h"
41#include "57xx_iscsi_constants.h"
42
43#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
44
45#define BNX2I_MAX_ADAPTERS 8
46
47#define ISCSI_MAX_CONNS_PER_HBA 128
48#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
49#define ISCSI_MAX_CMDS_PER_SESS 128
50
51/* Total active commands across all connections supported by devices */
52#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
53#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
54#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
55
56#define ISCSI_MAX_BDS_PER_CMD 32
57
58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
60
61/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
62#define MAX_BD_LENGTH 65535
63#define BD_SPLIT_SIZE 32768
64
65/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
66#define BNX2I_SQ_WQES_MIN 16
67#define BNX2I_570X_SQ_WQES_MAX 128
68#define BNX2I_5770X_SQ_WQES_MAX 512
69#define BNX2I_570X_SQ_WQES_DEFAULT 128
70#define BNX2I_5770X_SQ_WQES_DEFAULT 256
71
72#define BNX2I_570X_CQ_WQES_MAX 128
73#define BNX2I_5770X_CQ_WQES_MAX 512
74
75#define BNX2I_RQ_WQES_MIN 16
76#define BNX2I_RQ_WQES_MAX 32
77#define BNX2I_RQ_WQES_DEFAULT 16
78
79/* CCELLs per conn */
80#define BNX2I_CCELLS_MIN 16
81#define BNX2I_CCELLS_MAX 96
82#define BNX2I_CCELLS_DEFAULT 64
83
84#define ITT_INVALID_SIGNATURE 0xFFFF
85
86#define ISCSI_CMD_CLEANUP_TIMEOUT 100
87
88#define BNX2I_CONN_CTX_BUF_SIZE 16384
89
90#define BNX2I_SQ_WQE_SIZE 64
91#define BNX2I_RQ_WQE_SIZE 256
92#define BNX2I_CQE_SIZE 64
93
94#define MB_KERNEL_CTX_SHIFT 8
95#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
96
97#define CTX_SHIFT 7
98#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
99
100#define CTX_OFFSET 0x10000
101#define MAX_CID_CNT 0x4000
102
103/* 5709 context registers */
104#define BNX2_MQ_CONFIG2 0x00003d00
105#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
106#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
107
108/* 57710's BAR2 is mapped to doorbell registers */
109#define BNX2X_DOORBELL_PCI_BAR 2
110#define BNX2X_MAX_CQS 8
111
112#define CNIC_ARM_CQE 1
113#define CNIC_DISARM_CQE 0
114
115#define REG_RD(__hba, offset) \
116 readl(__hba->regview + offset)
117#define REG_WR(__hba, offset, val) \
118 writel(val, __hba->regview + offset)
119
120
121/**
122 * struct generic_pdu_resc - login pdu resource structure
123 *
124 * @req_buf: driver buffer used to stage payload associated with
125 * the login request
126 * @req_dma_addr: dma address for iscsi login request payload buffer
127 * @req_buf_size: actual login request payload length
128 * @req_wr_ptr: pointer into login request buffer when next data is
129 * to be written
130 * @resp_hdr: iscsi header where iscsi login response header is to
131 * be recreated
132 * @resp_buf: buffer to stage login response payload
133 * @resp_dma_addr: login response payload buffer dma address
134 * @resp_buf_size: login response paylod length
135 * @resp_wr_ptr: pointer into login response buffer when next data is
136 * to be written
137 * @req_bd_tbl: iscsi login request payload BD table
138 * @req_bd_dma: login request BD table dma address
139 * @resp_bd_tbl: iscsi login response payload BD table
140 * @resp_bd_dma: login request BD table dma address
141 *
142 * following structure defines buffer info for generic pdus such as iSCSI Login,
143 * Logout and NOP
144 */
145struct generic_pdu_resc {
146 char *req_buf;
147 dma_addr_t req_dma_addr;
148 u32 req_buf_size;
149 char *req_wr_ptr;
150 struct iscsi_hdr resp_hdr;
151 char *resp_buf;
152 dma_addr_t resp_dma_addr;
153 u32 resp_buf_size;
154 char *resp_wr_ptr;
155 char *req_bd_tbl;
156 dma_addr_t req_bd_dma;
157 char *resp_bd_tbl;
158 dma_addr_t resp_bd_dma;
159};
160
161
162/**
163 * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
164 *
165 * @link: list head to link elements
166 * @max_ptrs: maximun pointers that can be stored in this page
167 * @num_valid: number of pointer valid in this page
168 * @page: base addess for page pointer array
169 *
170 * structure to track DMA'able memory allocated for command BD tables
171 */
172struct bd_resc_page {
173 struct list_head link;
174 u32 max_ptrs;
175 u32 num_valid;
176 void *page[1];
177};
178
179
180/**
181 * struct io_bdt - I/O buffer destricptor table
182 *
183 * @bd_tbl: BD table's virtual address
184 * @bd_tbl_dma: BD table's dma address
185 * @bd_valid: num valid BD entries
186 *
187 * IO BD table
188 */
189struct io_bdt {
190 struct iscsi_bd *bd_tbl;
191 dma_addr_t bd_tbl_dma;
192 u16 bd_valid;
193};
194
195
196/**
197 * bnx2i_cmd - iscsi command structure
198 *
199 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
200 * @sg: SG list
201 * @io_tbl: buffer descriptor (BD) table
202 * @bd_tbl_dma: buffer descriptor (BD) table's dma address
203 */
204struct bnx2i_cmd {
205 struct iscsi_hdr hdr;
206 struct bnx2i_conn *conn;
207 struct scsi_cmnd *scsi_cmd;
208 struct scatterlist *sg;
209 struct io_bdt io_tbl;
210 dma_addr_t bd_tbl_dma;
211 struct bnx2i_cmd_request req;
212};
213
214
215/**
216 * struct bnx2i_conn - iscsi connection structure
217 *
218 * @cls_conn: pointer to iscsi cls conn
219 * @hba: adapter structure pointer
220 * @iscsi_conn_cid: iscsi conn id
221 * @fw_cid: firmware iscsi context id
222 * @ep: endpoint structure pointer
223 * @gen_pdu: login/nopout/logout pdu resources
224 * @violation_notified: bit mask used to track iscsi error/warning messages
225 * already printed out
226 *
227 * iSCSI connection structure
228 */
229struct bnx2i_conn {
230 struct iscsi_cls_conn *cls_conn;
231 struct bnx2i_hba *hba;
232 struct completion cmd_cleanup_cmpl;
233 int is_bound;
234
235 u32 iscsi_conn_cid;
236#define BNX2I_CID_RESERVED 0x5AFF
237 u32 fw_cid;
238
239 struct timer_list poll_timer;
240 /*
241 * Queue Pair (QP) related structure elements.
242 */
243 struct bnx2i_endpoint *ep;
244
245 /*
246 * Buffer for login negotiation process
247 */
248 struct generic_pdu_resc gen_pdu;
249 u64 violation_notified;
250};
251
252
253
254/**
255 * struct iscsi_cid_queue - Per adapter iscsi cid queue
256 *
257 * @cid_que_base: queue base memory
258 * @cid_que: queue memory pointer
259 * @cid_q_prod_idx: produce index
260 * @cid_q_cons_idx: consumer index
261 * @cid_q_max_idx: max index. used to detect wrap around condition
262 * @cid_free_cnt: queue size
263 * @conn_cid_tbl: iscsi cid to conn structure mapping table
264 *
265 * Per adapter iSCSI CID Queue
266 */
267struct iscsi_cid_queue {
268 void *cid_que_base;
269 u32 *cid_que;
270 u32 cid_q_prod_idx;
271 u32 cid_q_cons_idx;
272 u32 cid_q_max_idx;
273 u32 cid_free_cnt;
274 struct bnx2i_conn **conn_cid_tbl;
275};
276
277/**
278 * struct bnx2i_hba - bnx2i adapter structure
279 *
280 * @link: list head to link elements
281 * @cnic: pointer to cnic device
282 * @pcidev: pointer to pci dev
283 * @netdev: pointer to netdev structure
284 * @regview: mapped PCI register space
285 * @age: age, incremented by every recovery
286 * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
287 * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
288 * @reg_with_cnic: indicates whether the device is register with CNIC
289 * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
290 * @mtu_supported: Ethernet MTU supported
291 * @shost: scsi host pointer
292 * @max_sqes: SQ size
293 * @max_rqes: RQ size
294 * @max_cqes: CQ size
295 * @num_ccell: number of command cells per connection
296 * @ofld_conns_active: active connection list
297 * @max_active_conns: max offload connections supported by this device
298 * @cid_que: iscsi cid queue
299 * @ep_rdwr_lock: read / write lock to synchronize various ep lists
300 * @ep_ofld_list: connection list for pending offload completion
301 * @ep_destroy_list: connection list for pending offload completion
302 * @mp_bd_tbl: BD table to be used with middle path requests
303 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
304 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
305 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
306 * @lock: lock to synchonize access to hba structure
307 * @pci_did: PCI device ID
308 * @pci_vid: PCI vendor ID
309 * @pci_sdid: PCI subsystem device ID
310 * @pci_svid: PCI subsystem vendor ID
311 * @pci_func: PCI function number in system pci tree
312 * @pci_devno: PCI device number in system pci tree
313 * @num_wqe_sent: statistic counter, total wqe's sent
314 * @num_cqe_rcvd: statistic counter, total cqe's received
315 * @num_intr_claimed: statistic counter, total interrupts claimed
316 * @link_changed_count: statistic counter, num of link change notifications
317 * received
318 * @ipaddr_changed_count: statistic counter, num times IP address changed while
319 * at least one connection is offloaded
320 * @num_sess_opened: statistic counter, total num sessions opened
321 * @num_conn_opened: statistic counter, total num conns opened on this hba
322 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
323 * currently offloaded connection, used to decode
324 * context memory
325 *
326 * Adapter Data Structure
327 */
328struct bnx2i_hba {
329 struct list_head link;
330 struct cnic_dev *cnic;
331 struct pci_dev *pcidev;
332 struct net_device *netdev;
333 void __iomem *regview;
334
335 u32 age;
336 unsigned long cnic_dev_type;
337 #define BNX2I_NX2_DEV_5706 0x0
338 #define BNX2I_NX2_DEV_5708 0x1
339 #define BNX2I_NX2_DEV_5709 0x2
340 #define BNX2I_NX2_DEV_57710 0x3
341 u32 mail_queue_access;
342 #define BNX2I_MQ_KERNEL_MODE 0x0
343 #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
344 #define BNX2I_MQ_BIN_MODE 0x2
345 unsigned long reg_with_cnic;
346 #define BNX2I_CNIC_REGISTERED 1
347
348 unsigned long adapter_state;
349 #define ADAPTER_STATE_UP 0
350 #define ADAPTER_STATE_GOING_DOWN 1
351 #define ADAPTER_STATE_LINK_DOWN 2
352 #define ADAPTER_STATE_INIT_FAILED 31
353 unsigned int mtu_supported;
354 #define BNX2I_MAX_MTU_SUPPORTED 1500
355
356 struct Scsi_Host *shost;
357
358 u32 max_sqes;
359 u32 max_rqes;
360 u32 max_cqes;
361 u32 num_ccell;
362
363 int ofld_conns_active;
364
365 int max_active_conns;
366 struct iscsi_cid_queue cid_que;
367
368 rwlock_t ep_rdwr_lock;
369 struct list_head ep_ofld_list;
370 struct list_head ep_destroy_list;
371
372 /*
373 * BD table to be used with MP (Middle Path requests.
374 */
375 char *mp_bd_tbl;
376 dma_addr_t mp_bd_dma;
377 char *dummy_buffer;
378 dma_addr_t dummy_buf_dma;
379
380 spinlock_t lock; /* protects hba structure access */
381 struct mutex net_dev_lock;/* sync net device access */
382
383 /*
384 * PCI related info.
385 */
386 u16 pci_did;
387 u16 pci_vid;
388 u16 pci_sdid;
389 u16 pci_svid;
390 u16 pci_func;
391 u16 pci_devno;
392
393 /*
394 * Following are a bunch of statistics useful during development
395 * and later stage for score boarding.
396 */
397 u32 num_wqe_sent;
398 u32 num_cqe_rcvd;
399 u32 num_intr_claimed;
400 u32 link_changed_count;
401 u32 ipaddr_changed_count;
402 u32 num_sess_opened;
403 u32 num_conn_opened;
404 unsigned int ctx_ccell_tasks;
405};
406
407
408/*******************************************************************************
409 * QP [ SQ / RQ / CQ ] info.
410 ******************************************************************************/
411
412/*
413 * SQ/RQ/CQ generic structure definition
414 */
415struct sqe {
416 u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
417};
418
419struct rqe {
420 u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
421};
422
423struct cqe {
424 u8 cqe_byte[BNX2I_CQE_SIZE];
425};
426
427
428enum {
429#if defined(__LITTLE_ENDIAN)
430 CNIC_EVENT_COAL_INDEX = 0x0,
431 CNIC_SEND_DOORBELL = 0x4,
432 CNIC_EVENT_CQ_ARM = 0x7,
433 CNIC_RECV_DOORBELL = 0x8
434#elif defined(__BIG_ENDIAN)
435 CNIC_EVENT_COAL_INDEX = 0x2,
436 CNIC_SEND_DOORBELL = 0x6,
437 CNIC_EVENT_CQ_ARM = 0x4,
438 CNIC_RECV_DOORBELL = 0xa
439#endif
440};
441
442
443/*
444 * CQ DB
445 */
446struct bnx2x_iscsi_cq_pend_cmpl {
447 /* CQ producer, updated by Ustorm */
448 u16 ustrom_prod;
449 /* CQ pending completion counter */
450 u16 pend_cntr;
451};
452
453
454struct bnx2i_5771x_cq_db {
455 struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
456 /* CQ pending completion ITT array */
457 u16 itt[BNX2X_MAX_CQS];
458 /* Cstorm CQ sequence to notify array, updated by driver */;
459 u16 sqn[BNX2X_MAX_CQS];
460 u32 reserved[4] /* 16 byte allignment */;
461};
462
463
464struct bnx2i_5771x_sq_rq_db {
465 u16 prod_idx;
466 u8 reserved0[14]; /* Pad structure size to 16 bytes */
467};
468
469
470struct bnx2i_5771x_dbell_hdr {
471 u8 header;
472 /* 1 for rx doorbell, 0 for tx doorbell */
473#define B577XX_DOORBELL_HDR_RX (0x1<<0)
474#define B577XX_DOORBELL_HDR_RX_SHIFT 0
475 /* 0 for normal doorbell, 1 for advertise wnd doorbell */
476#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
477#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
478 /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
479#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
480#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
481 /* connection type */
482#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
483#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
484};
485
486struct bnx2i_5771x_dbell {
487 struct bnx2i_5771x_dbell_hdr dbell;
488 u8 pad[3];
489
490};
491
492/**
493 * struct qp_info - QP (share queue region) atrributes structure
494 *
495 * @ctx_base: ioremapped pci register base to access doorbell register
496 * pertaining to this offloaded connection
497 * @sq_virt: virtual address of send queue (SQ) region
498 * @sq_phys: DMA address of SQ memory region
499 * @sq_mem_size: SQ size
500 * @sq_prod_qe: SQ producer entry pointer
501 * @sq_cons_qe: SQ consumer entry pointer
502 * @sq_first_qe: virtaul address of first entry in SQ
503 * @sq_last_qe: virtaul address of last entry in SQ
504 * @sq_prod_idx: SQ producer index
505 * @sq_cons_idx: SQ consumer index
506 * @sqe_left: number sq entry left
507 * @sq_pgtbl_virt: page table describing buffer consituting SQ region
508 * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
509 * @sq_pgtbl_size: SQ page table size
510 * @cq_virt: virtual address of completion queue (CQ) region
511 * @cq_phys: DMA address of RQ memory region
512 * @cq_mem_size: CQ size
513 * @cq_prod_qe: CQ producer entry pointer
514 * @cq_cons_qe: CQ consumer entry pointer
515 * @cq_first_qe: virtaul address of first entry in CQ
516 * @cq_last_qe: virtaul address of last entry in CQ
517 * @cq_prod_idx: CQ producer index
518 * @cq_cons_idx: CQ consumer index
519 * @cqe_left: number cq entry left
520 * @cqe_size: size of each CQ entry
521 * @cqe_exp_seq_sn: next expected CQE sequence number
522 * @cq_pgtbl_virt: page table describing buffer consituting CQ region
523 * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
524 * @cq_pgtbl_size: CQ page table size
525 * @rq_virt: virtual address of receive queue (RQ) region
526 * @rq_phys: DMA address of RQ memory region
527 * @rq_mem_size: RQ size
528 * @rq_prod_qe: RQ producer entry pointer
529 * @rq_cons_qe: RQ consumer entry pointer
530 * @rq_first_qe: virtaul address of first entry in RQ
531 * @rq_last_qe: virtaul address of last entry in RQ
532 * @rq_prod_idx: RQ producer index
533 * @rq_cons_idx: RQ consumer index
534 * @rqe_left: number rq entry left
535 * @rq_pgtbl_virt: page table describing buffer consituting RQ region
536 * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
537 * @rq_pgtbl_size: RQ page table size
538 *
539 * queue pair (QP) is a per connection shared data structure which is used
540 * to send work requests (SQ), receive completion notifications (CQ)
541 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
542 * below holds queue memory, consumer/producer indexes and page table
543 * information
544 */
545struct qp_info {
546 void __iomem *ctx_base;
547#define DPM_TRIGER_TYPE 0x40
548
549#define BNX2I_570x_QUE_DB_SIZE 0
550#define BNX2I_5771x_QUE_DB_SIZE 16
551 struct sqe *sq_virt;
552 dma_addr_t sq_phys;
553 u32 sq_mem_size;
554
555 struct sqe *sq_prod_qe;
556 struct sqe *sq_cons_qe;
557 struct sqe *sq_first_qe;
558 struct sqe *sq_last_qe;
559 u16 sq_prod_idx;
560 u16 sq_cons_idx;
561 u32 sqe_left;
562
563 void *sq_pgtbl_virt;
564 dma_addr_t sq_pgtbl_phys;
565 u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
566
567 struct cqe *cq_virt;
568 dma_addr_t cq_phys;
569 u32 cq_mem_size;
570
571 struct cqe *cq_prod_qe;
572 struct cqe *cq_cons_qe;
573 struct cqe *cq_first_qe;
574 struct cqe *cq_last_qe;
575 u16 cq_prod_idx;
576 u16 cq_cons_idx;
577 u32 cqe_left;
578 u32 cqe_size;
579 u32 cqe_exp_seq_sn;
580
581 void *cq_pgtbl_virt;
582 dma_addr_t cq_pgtbl_phys;
583 u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
584
585 struct rqe *rq_virt;
586 dma_addr_t rq_phys;
587 u32 rq_mem_size;
588
589 struct rqe *rq_prod_qe;
590 struct rqe *rq_cons_qe;
591 struct rqe *rq_first_qe;
592 struct rqe *rq_last_qe;
593 u16 rq_prod_idx;
594 u16 rq_cons_idx;
595 u32 rqe_left;
596
597 void *rq_pgtbl_virt;
598 dma_addr_t rq_pgtbl_phys;
599 u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
600};
601
602
603
604/*
605 * CID handles
606 */
607struct ep_handles {
608 u32 fw_cid;
609 u32 drv_iscsi_cid;
610 u16 pg_cid;
611 u16 rsvd;
612};
613
614
615enum {
616 EP_STATE_IDLE = 0x0,
617 EP_STATE_PG_OFLD_START = 0x1,
618 EP_STATE_PG_OFLD_COMPL = 0x2,
619 EP_STATE_OFLD_START = 0x4,
620 EP_STATE_OFLD_COMPL = 0x8,
621 EP_STATE_CONNECT_START = 0x10,
622 EP_STATE_CONNECT_COMPL = 0x20,
623 EP_STATE_ULP_UPDATE_START = 0x40,
624 EP_STATE_ULP_UPDATE_COMPL = 0x80,
625 EP_STATE_DISCONN_START = 0x100,
626 EP_STATE_DISCONN_COMPL = 0x200,
627 EP_STATE_CLEANUP_START = 0x400,
628 EP_STATE_CLEANUP_CMPL = 0x800,
629 EP_STATE_TCP_FIN_RCVD = 0x1000,
630 EP_STATE_TCP_RST_RCVD = 0x2000,
631 EP_STATE_PG_OFLD_FAILED = 0x1000000,
632 EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
633 EP_STATE_CLEANUP_FAILED = 0x4000000,
634 EP_STATE_OFLD_FAILED = 0x8000000,
635 EP_STATE_CONNECT_FAILED = 0x10000000,
636 EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
637};
638
639/**
640 * struct bnx2i_endpoint - representation of tcp connection in NX2 world
641 *
642 * @link: list head to link elements
643 * @hba: adapter to which this connection belongs
644 * @conn: iscsi connection this EP is linked to
645 * @sess: iscsi session this EP is linked to
646 * @cm_sk: cnic sock struct
647 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
648 * after HBA reset is completed by bnx2i/cnic/bnx2
649 * modules
650 * @state: tracks offload connection state machine
651 * @teardown_mode: indicates if conn teardown is abortive or orderly
652 * @qp: QP information
653 * @ids: contains chip allocated *context id* & driver assigned
654 * *iscsi cid*
655 * @ofld_timer: offload timer to detect timeout
656 * @ofld_wait: wait queue
657 *
658 * Endpoint Structure - equivalent of tcp socket structure
659 */
660struct bnx2i_endpoint {
661 struct list_head link;
662 struct bnx2i_hba *hba;
663 struct bnx2i_conn *conn;
664 struct cnic_sock *cm_sk;
665 u32 hba_age;
666 u32 state;
667 unsigned long timestamp;
668 int num_active_cmds;
669
670 struct qp_info qp;
671 struct ep_handles ids;
672 #define ep_iscsi_cid ids.drv_iscsi_cid
673 #define ep_cid ids.fw_cid
674 #define ep_pg_cid ids.pg_cid
675 struct timer_list ofld_timer;
676 wait_queue_head_t ofld_wait;
677};
678
679
680
681/* Global variables */
682extern unsigned int error_mask1, error_mask2;
683extern u64 iscsi_error_mask;
684extern unsigned int en_tcp_dack;
685extern unsigned int event_coal_div;
686
687extern struct scsi_transport_template *bnx2i_scsi_xport_template;
688extern struct iscsi_transport bnx2i_iscsi_transport;
689extern struct cnic_ulp_ops bnx2i_cnic_cb;
690
691extern unsigned int sq_size;
692extern unsigned int rq_size;
693
694extern struct device_attribute *bnx2i_dev_attributes[];
695
696
697
698/*
699 * Function Prototypes
700 */
701extern void bnx2i_identify_device(struct bnx2i_hba *hba);
702extern void bnx2i_register_device(struct bnx2i_hba *hba);
703
704extern void bnx2i_ulp_init(struct cnic_dev *dev);
705extern void bnx2i_ulp_exit(struct cnic_dev *dev);
706extern void bnx2i_start(void *handle);
707extern void bnx2i_stop(void *handle);
708extern void bnx2i_reg_dev_all(void);
709extern void bnx2i_unreg_dev_all(void);
710extern struct bnx2i_hba *get_adapter_list_head(void);
711
712struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
713 u16 iscsi_cid);
714
715int bnx2i_alloc_ep_pool(void);
716void bnx2i_release_ep_pool(void);
717struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
718struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
719
720struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
721
722struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
723void bnx2i_free_hba(struct bnx2i_hba *hba);
724
725void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
726void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
727
728void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
729
730void bnx2i_drop_session(struct iscsi_cls_session *session);
731
732extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
733extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
734 struct iscsi_task *mtask);
735extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
736 struct iscsi_task *mtask);
737extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
738 struct bnx2i_cmd *cmnd);
739extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
740 struct iscsi_task *mtask, u32 ttt,
741 char *datap, int data_len, int unsol);
742extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
743 struct iscsi_task *mtask);
744extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
745 struct bnx2i_cmd *cmd);
746extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
747 struct bnx2i_endpoint *ep);
748extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
749extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
750 struct bnx2i_endpoint *ep);
751
752extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
753 struct bnx2i_endpoint *ep);
754extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
755 struct bnx2i_endpoint *ep);
756extern void bnx2i_ep_ofld_timer(unsigned long data);
757extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
758 struct bnx2i_hba *hba, u32 iscsi_cid);
759extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
760 struct bnx2i_hba *hba, u32 iscsi_cid);
761
762extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
763extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
764
765/* Debug related function prototypes */
766extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
767extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
768extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
769extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
770
771#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 000000000000..906cef5cda86
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include <scsi/scsi_tcq.h>
15#include <scsi/libiscsi.h>
16#include "bnx2i.h"
17
18/**
19 * bnx2i_get_cid_num - get cid from ep
20 * @ep: endpoint pointer
21 *
22 * Only applicable to 57710 family of devices
23 */
24static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
25{
26 u32 cid;
27
28 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
29 cid = ep->ep_cid;
30 else
31 cid = GET_CID_NUM(ep->ep_cid);
32 return cid;
33}
34
35
36/**
37 * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
38 * @hba: Adapter for which adjustments is to be made
39 *
40 * Only applicable to 57710 family of devices
41 */
42static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
43{
44 u32 num_elements_per_pg;
45
46 if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
47 test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
48 test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
49 if (!is_power_of_2(hba->max_sqes))
50 hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
51
52 if (!is_power_of_2(hba->max_rqes))
53 hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
54 }
55
56 /* Adjust each queue size if the user selection does not
57 * yield integral num of page buffers
58 */
59 /* adjust SQ */
60 num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
61 if (hba->max_sqes < num_elements_per_pg)
62 hba->max_sqes = num_elements_per_pg;
63 else if (hba->max_sqes % num_elements_per_pg)
64 hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
65 ~(num_elements_per_pg - 1);
66
67 /* adjust CQ */
68 num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
69 if (hba->max_cqes < num_elements_per_pg)
70 hba->max_cqes = num_elements_per_pg;
71 else if (hba->max_cqes % num_elements_per_pg)
72 hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
73 ~(num_elements_per_pg - 1);
74
75 /* adjust RQ */
76 num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
77 if (hba->max_rqes < num_elements_per_pg)
78 hba->max_rqes = num_elements_per_pg;
79 else if (hba->max_rqes % num_elements_per_pg)
80 hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
81 ~(num_elements_per_pg - 1);
82}
83
84
85/**
86 * bnx2i_get_link_state - get network interface link state
87 * @hba: adapter instance pointer
88 *
89 * updates adapter structure flag based on netdev state
90 */
91static void bnx2i_get_link_state(struct bnx2i_hba *hba)
92{
93 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
94 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
95 else
96 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
97}
98
99
100/**
101 * bnx2i_iscsi_license_error - displays iscsi license related error message
102 * @hba: adapter instance pointer
103 * @error_code: error classification
104 *
105 * Puts out an error log when driver is unable to offload iscsi connection
106 * due to license restrictions
107 */
108static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
109{
110 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
111 /* iSCSI offload not supported on this device */
112 printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
113 hba->netdev->name);
114 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
115 /* iSCSI offload not supported on this LOM device */
116 printk(KERN_ERR "bnx2i: LOM is not enable to "
117 "offload iSCSI connections, dev=%s\n",
118 hba->netdev->name);
119 set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
120}
121
122
123/**
124 * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
125 * @ep: endpoint (transport indentifier) structure
126 * @action: action, ARM or DISARM. For now only ARM_CQE is used
127 *
128 * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
129 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
130 * outstanding and on chip timer expires
131 */
132void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{
134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index;
136
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return;
139
140 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn +
142 ep->num_active_cmds / event_coal_div;
143 cq_index %= (ep->qp.cqe_size * 2 + 1);
144 if (!cq_index) {
145 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *)
147 ep->qp.cq_pgtbl_virt;
148 cq_db->sqn[0] = cq_index;
149 }
150 }
151}
152
153
154/**
155 * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
156 * @conn: iscsi connection on which RQ event occured
157 * @ptr: driver buffer to which RQ buffer contents is to
158 * be copied
159 * @len: length of valid data inside RQ buf
160 *
161 * Copies RQ buffer contents from shared (DMA'able) memory region to
162 * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
163 * scsi sense info
164 */
165void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
166{
167 if (!bnx2i_conn->ep->qp.rqe_left)
168 return;
169
170 bnx2i_conn->ep->qp.rqe_left--;
171 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
172 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
173 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
174 bnx2i_conn->ep->qp.rq_cons_idx = 0;
175 } else {
176 bnx2i_conn->ep->qp.rq_cons_qe++;
177 bnx2i_conn->ep->qp.rq_cons_idx++;
178 }
179}
180
181
182static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
183{
184 struct bnx2i_5771x_dbell dbell;
185 u32 msg;
186
187 memset(&dbell, 0, sizeof(dbell));
188 dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
189 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
190 msg = *((u32 *)&dbell);
191 /* TODO : get doorbell register mapping */
192 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
193}
194
195
196/**
197 * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
198 * @conn: iscsi connection on which event to post
199 * @count: number of RQ buffer being posted to chip
200 *
201 * No need to ring hardware doorbell for 57710 family of devices
202 */
203void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
204{
205 struct bnx2i_5771x_sq_rq_db *rq_db;
206 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
207 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
208
209 ep->qp.rqe_left += count;
210 ep->qp.rq_prod_idx &= 0x7FFF;
211 ep->qp.rq_prod_idx += count;
212
213 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
214 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
215 if (!hi_bit)
216 ep->qp.rq_prod_idx |= 0x8000;
217 } else
218 ep->qp.rq_prod_idx |= hi_bit;
219
220 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
221 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
222 rq_db->prod_idx = ep->qp.rq_prod_idx;
223 /* no need to ring hardware doorbell for 57710 */
224 } else {
225 writew(ep->qp.rq_prod_idx,
226 ep->qp.ctx_base + CNIC_RECV_DOORBELL);
227 }
228 mmiowb();
229}
230
231
232/**
233 * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
234 * @conn: iscsi connection to which new SQ entries belong
235 * @count: number of SQ WQEs to post
236 *
237 * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
238 * of devices. For 5706/5708/5709 new SQ WQE count is written into the
239 * doorbell register
240 */
241static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
242{
243 struct bnx2i_5771x_sq_rq_db *sq_db;
244 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
245
246 ep->num_active_cmds++;
247 wmb(); /* flush SQ WQE memory before the doorbell is rung */
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
250 sq_db->prod_idx = ep->qp.sq_prod_idx;
251 bnx2i_ring_577xx_doorbell(bnx2i_conn);
252 } else
253 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
254
255 mmiowb(); /* flush posted PCI writes */
256}
257
258
259/**
260 * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
261 * @conn: iscsi connection to which new SQ entries belong
262 * @count: number of SQ WQEs to post
263 *
264 * this routine will update SQ driver parameters and ring the doorbell
265 */
266static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
267 int count)
268{
269 int tmp_cnt;
270
271 if (count == 1) {
272 if (bnx2i_conn->ep->qp.sq_prod_qe ==
273 bnx2i_conn->ep->qp.sq_last_qe)
274 bnx2i_conn->ep->qp.sq_prod_qe =
275 bnx2i_conn->ep->qp.sq_first_qe;
276 else
277 bnx2i_conn->ep->qp.sq_prod_qe++;
278 } else {
279 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
280 bnx2i_conn->ep->qp.sq_last_qe)
281 bnx2i_conn->ep->qp.sq_prod_qe += count;
282 else {
283 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
284 bnx2i_conn->ep->qp.sq_prod_qe;
285 bnx2i_conn->ep->qp.sq_prod_qe =
286 &bnx2i_conn->ep->qp.sq_first_qe[count -
287 (tmp_cnt + 1)];
288 }
289 }
290 bnx2i_conn->ep->qp.sq_prod_idx += count;
291 /* Ring the doorbell */
292 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
293}
294
295
296/**
297 * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
298 * @conn: iscsi connection
299 * @cmd: driver command structure which is requesting
300 * a WQE to sent to chip for further processing
301 *
302 * prepare and post an iSCSI Login request WQE to CNIC firmware
303 */
304int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
305 struct iscsi_task *task)
306{
307 struct bnx2i_cmd *bnx2i_cmd;
308 struct bnx2i_login_request *login_wqe;
309 struct iscsi_login *login_hdr;
310 u32 dword;
311
312 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
313 login_hdr = (struct iscsi_login *)task->hdr;
314 login_wqe = (struct bnx2i_login_request *)
315 bnx2i_conn->ep->qp.sq_prod_qe;
316
317 login_wqe->op_code = login_hdr->opcode;
318 login_wqe->op_attr = login_hdr->flags;
319 login_wqe->version_max = login_hdr->max_version;
320 login_wqe->version_min = login_hdr->min_version;
321 login_wqe->data_length = ntoh24(login_hdr->dlength);
322 login_wqe->isid_lo = *((u32 *) login_hdr->isid);
323 login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
324 login_wqe->tsih = login_hdr->tsih;
325 login_wqe->itt = task->itt |
326 (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
327 login_wqe->cid = login_hdr->cid;
328
329 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
330 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
331
332 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
333 login_wqe->resp_bd_list_addr_hi =
334 (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
335
336 dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
337 (bnx2i_conn->gen_pdu.resp_buf_size <<
338 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
339 login_wqe->resp_buffer = dword;
340 login_wqe->flags = 0;
341 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
342 login_wqe->bd_list_addr_hi =
343 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
344 login_wqe->num_bds = 1;
345 login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
346
347 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
348 return 0;
349}
350
351/**
352 * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
353 * @conn: iscsi connection
354 * @mtask: driver command structure which is requesting
355 * a WQE to sent to chip for further processing
356 *
357 * prepare and post an iSCSI Login request WQE to CNIC firmware
358 */
359int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
360 struct iscsi_task *mtask)
361{
362 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
363 struct iscsi_tm *tmfabort_hdr;
364 struct scsi_cmnd *ref_sc;
365 struct iscsi_task *ctask;
366 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword;
369
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
372 tmfabort_wqe = (struct bnx2i_tmf_request *)
373 bnx2i_conn->ep->qp.sq_prod_qe;
374
375 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
376 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc)
388 /*
389 * the iscsi layer must have completed the cmd while this
390 * was starting up.
391 */
392 return 0;
393 ref_sc = ctask->sc;
394
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
403 tmfabort_wqe->bd_list_addr_hi = (u32)
404 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
405 tmfabort_wqe->num_bds = 1;
406 tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
407
408 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
409 return 0;
410}
411
412/**
413 * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
414 * @conn: iscsi connection
415 * @cmd: driver command structure which is requesting
416 * a WQE to sent to chip for further processing
417 *
418 * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
419 */
420int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
421 struct bnx2i_cmd *cmd)
422{
423 struct bnx2i_cmd_request *scsi_cmd_wqe;
424
425 scsi_cmd_wqe = (struct bnx2i_cmd_request *)
426 bnx2i_conn->ep->qp.sq_prod_qe;
427 memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
428 scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
429
430 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
431 return 0;
432}
433
434/**
435 * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
436 * @conn: iscsi connection
437 * @cmd: driver command structure which is requesting
438 * a WQE to sent to chip for further processing
439 * @ttt: TTT to be used when building pdu header
440 * @datap: payload buffer pointer
441 * @data_len: payload data length
442 * @unsol: indicated whether nopout pdu is unsolicited pdu or
443 * in response to target's NOPIN w/ TTT != FFFFFFFF
444 *
445 * prepare and post a nopout request WQE to CNIC firmware
446 */
447int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
448 struct iscsi_task *task, u32 ttt,
449 char *datap, int data_len, int unsol)
450{
451 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
452 struct bnx2i_cmd *bnx2i_cmd;
453 struct bnx2i_nop_out_request *nopout_wqe;
454 struct iscsi_nopout *nopout_hdr;
455
456 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
457 nopout_hdr = (struct iscsi_nopout *)task->hdr;
458 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
459 nopout_wqe->op_code = nopout_hdr->opcode;
460 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
461 memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
462
463 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
464 u32 tmp = nopout_hdr->lun[0];
465 /* 57710 requires LUN field to be swapped */
466 nopout_hdr->lun[0] = nopout_hdr->lun[1];
467 nopout_hdr->lun[1] = tmp;
468 }
469
470 nopout_wqe->itt = ((u16)task->itt |
471 (ISCSI_TASK_TYPE_MPATH <<
472 ISCSI_TMF_REQUEST_TYPE_SHIFT));
473 nopout_wqe->ttt = ttt;
474 nopout_wqe->flags = 0;
475 if (!unsol)
476 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
477 else if (nopout_hdr->itt == RESERVED_ITT)
478 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
479
480 nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
481 nopout_wqe->data_length = data_len;
482 if (data_len) {
483 /* handle payload data, not required in first release */
484 printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
485 } else {
486 nopout_wqe->bd_list_addr_lo = (u32)
487 bnx2i_conn->hba->mp_bd_dma;
488 nopout_wqe->bd_list_addr_hi =
489 (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
490 nopout_wqe->num_bds = 1;
491 }
492 nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
493
494 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
495 return 0;
496}
497
498
499/**
500 * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
501 * @conn: iscsi connection
502 * @cmd: driver command structure which is requesting
503 * a WQE to sent to chip for further processing
504 *
505 * prepare and post logout request WQE to CNIC firmware
506 */
507int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
508 struct iscsi_task *task)
509{
510 struct bnx2i_cmd *bnx2i_cmd;
511 struct bnx2i_logout_request *logout_wqe;
512 struct iscsi_logout *logout_hdr;
513
514 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
515 logout_hdr = (struct iscsi_logout *)task->hdr;
516
517 logout_wqe = (struct bnx2i_logout_request *)
518 bnx2i_conn->ep->qp.sq_prod_qe;
519 memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
520
521 logout_wqe->op_code = logout_hdr->opcode;
522 logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
523 logout_wqe->op_attr =
524 logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
525 logout_wqe->itt = ((u16)task->itt |
526 (ISCSI_TASK_TYPE_MPATH <<
527 ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
528 logout_wqe->data_length = 0;
529 logout_wqe->cid = 0;
530
531 logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
532 logout_wqe->bd_list_addr_hi = (u32)
533 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
534 logout_wqe->num_bds = 1;
535 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
536
537 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
538 return 0;
539}
540
541
542/**
543 * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
544 * @conn: iscsi connection which requires iscsi parameter update
545 *
546 * sends down iSCSI Conn Update request to move iSCSI conn to FFP
547 */
548void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
549{
550 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
551 struct bnx2i_hba *hba = bnx2i_conn->hba;
552 struct kwqe *kwqe_arr[2];
553 struct iscsi_kwqe_conn_update *update_wqe;
554 struct iscsi_kwqe_conn_update conn_update_kwqe;
555
556 update_wqe = &conn_update_kwqe;
557
558 update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
559 update_wqe->hdr.flags =
560 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
561
562 /* 5771x requires conn context id to be passed as is */
563 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
564 update_wqe->context_id = bnx2i_conn->ep->ep_cid;
565 else
566 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
567 update_wqe->conn_flags = 0;
568 if (conn->hdrdgst_en)
569 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
570 if (conn->datadgst_en)
571 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
572 if (conn->session->initial_r2t_en)
573 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
574 if (conn->session->imm_data_en)
575 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
576
577 update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
578 update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
579 update_wqe->first_burst_length = conn->session->first_burst;
580 update_wqe->max_burst_length = conn->session->max_burst;
581 update_wqe->exp_stat_sn = conn->exp_statsn;
582 update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
583 update_wqe->session_error_recovery_level = conn->session->erl;
584 iscsi_conn_printk(KERN_ALERT, conn,
585 "bnx2i: conn update - MBL 0x%x FBL 0x%x"
586 "MRDSL_I 0x%x MRDSL_T 0x%x \n",
587 update_wqe->max_burst_length,
588 update_wqe->first_burst_length,
589 update_wqe->max_recv_pdu_length,
590 update_wqe->max_send_pdu_length);
591
592 kwqe_arr[0] = (struct kwqe *) update_wqe;
593 if (hba->cnic && hba->cnic->submit_kwqes)
594 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
595}
596
597
598/**
599 * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
600 * @data: endpoint (transport handle) structure pointer
601 *
602 * routine to handle connection offload/destroy request timeout
603 */
604void bnx2i_ep_ofld_timer(unsigned long data)
605{
606 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
607
608 if (ep->state == EP_STATE_OFLD_START) {
609 printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
610 ep->state = EP_STATE_OFLD_FAILED;
611 } else if (ep->state == EP_STATE_DISCONN_START) {
612 printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
613 ep->state = EP_STATE_DISCONN_TIMEDOUT;
614 } else if (ep->state == EP_STATE_CLEANUP_START) {
615 printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
616 ep->state = EP_STATE_CLEANUP_FAILED;
617 }
618
619 wake_up_interruptible(&ep->ofld_wait);
620}
621
622
623static int bnx2i_power_of2(u32 val)
624{
625 u32 power = 0;
626 if (val & (val - 1))
627 return power;
628 val--;
629 while (val) {
630 val = val >> 1;
631 power++;
632 }
633 return power;
634}
635
636
637/**
638 * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
639 * @hba: adapter structure pointer
640 * @cmd: driver command structure which is requesting
641 * a WQE to sent to chip for further processing
642 *
643 * prepares and posts CONN_OFLD_REQ1/2 KWQE
644 */
645void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
646{
647 struct bnx2i_cleanup_request *cmd_cleanup;
648
649 cmd_cleanup =
650 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
651 memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
652
653 cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
654 cmd_cleanup->itt = cmd->req.itt;
655 cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
656
657 bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
658}
659
660
661/**
662 * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
663 * @hba: adapter structure pointer
664 * @ep: endpoint (transport indentifier) structure
665 *
666 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
667 * iscsi connection context clean-up process
668 */
669void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
670{
671 struct kwqe *kwqe_arr[2];
672 struct iscsi_kwqe_conn_destroy conn_cleanup;
673
674 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
675
676 conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
677 conn_cleanup.hdr.flags =
678 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
679 /* 5771x requires conn context id to be passed as is */
680 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
681 conn_cleanup.context_id = ep->ep_cid;
682 else
683 conn_cleanup.context_id = (ep->ep_cid >> 7);
684
685 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
686
687 kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
688 if (hba->cnic && hba->cnic->submit_kwqes)
689 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
690}
691
692
693/**
694 * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
695 * @hba: adapter structure pointer
696 * @ep: endpoint (transport indentifier) structure
697 *
698 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
699 */
700static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
701 struct bnx2i_endpoint *ep)
702{
703 struct kwqe *kwqe_arr[2];
704 struct iscsi_kwqe_conn_offload1 ofld_req1;
705 struct iscsi_kwqe_conn_offload2 ofld_req2;
706 dma_addr_t dma_addr;
707 int num_kwqes = 2;
708 u32 *ptbl;
709
710 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
711 ofld_req1.hdr.flags =
712 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
713
714 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
715
716 dma_addr = ep->qp.sq_pgtbl_phys;
717 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
718 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
719
720 dma_addr = ep->qp.cq_pgtbl_phys;
721 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
722 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
723
724 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
725 ofld_req2.hdr.flags =
726 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
727
728 dma_addr = ep->qp.rq_pgtbl_phys;
729 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
730 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
731
732 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
733
734 ofld_req2.sq_first_pte.hi = *ptbl++;
735 ofld_req2.sq_first_pte.lo = *ptbl;
736
737 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
738 ofld_req2.cq_first_pte.hi = *ptbl++;
739 ofld_req2.cq_first_pte.lo = *ptbl;
740
741 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
742 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
743 ofld_req2.num_additional_wqes = 0;
744
745 if (hba->cnic && hba->cnic->submit_kwqes)
746 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
747}
748
749
750/**
751 * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
752 * @hba: adapter structure pointer
753 * @ep: endpoint (transport indentifier) structure
754 *
755 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
756 */
757static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
758 struct bnx2i_endpoint *ep)
759{
760 struct kwqe *kwqe_arr[5];
761 struct iscsi_kwqe_conn_offload1 ofld_req1;
762 struct iscsi_kwqe_conn_offload2 ofld_req2;
763 struct iscsi_kwqe_conn_offload3 ofld_req3[1];
764 dma_addr_t dma_addr;
765 int num_kwqes = 2;
766 u32 *ptbl;
767
768 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
769 ofld_req1.hdr.flags =
770 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
771
772 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
773
774 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
775 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
776 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
777
778 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
779 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
780 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
781
782 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
783 ofld_req2.hdr.flags =
784 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
785
786 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
787 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
788 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
789
790 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
791 ofld_req2.sq_first_pte.hi = *ptbl++;
792 ofld_req2.sq_first_pte.lo = *ptbl;
793
794 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
795 ofld_req2.cq_first_pte.hi = *ptbl++;
796 ofld_req2.cq_first_pte.lo = *ptbl;
797
798 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
799 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
800
801 ofld_req2.num_additional_wqes = 1;
802 memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
803 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
804 ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
805 ofld_req3[0].qp_first_pte[0].lo = *ptbl;
806
807 kwqe_arr[2] = (struct kwqe *) ofld_req3;
808 /* need if we decide to go with multiple KCQE's per conn */
809 num_kwqes += 1;
810
811 if (hba->cnic && hba->cnic->submit_kwqes)
812 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
813}
814
815/**
816 * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
817 *
818 * @hba: adapter structure pointer
819 * @ep: endpoint (transport indentifier) structure
820 *
821 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
822 */
823void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
824{
825 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
826 bnx2i_5771x_send_conn_ofld_req(hba, ep);
827 else
828 bnx2i_570x_send_conn_ofld_req(hba, ep);
829}
830
831
832/**
833 * setup_qp_page_tables - iscsi QP page table setup function
834 * @ep: endpoint (transport indentifier) structure
835 *
836 * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
837 * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
838 * PT in little endian format
839 */
840static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
841{
842 int num_pages;
843 u32 *ptbl;
844 dma_addr_t page;
845 int cnic_dev_10g;
846
847 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
848 cnic_dev_10g = 1;
849 else
850 cnic_dev_10g = 0;
851
852 /* SQ page table */
853 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
854 num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
855 page = ep->qp.sq_phys;
856
857 if (cnic_dev_10g)
858 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
859 else
860 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
861 while (num_pages--) {
862 if (cnic_dev_10g) {
863 /* PTE is written in little endian format for 57710 */
864 *ptbl = (u32) page;
865 ptbl++;
866 *ptbl = (u32) ((u64) page >> 32);
867 ptbl++;
868 page += PAGE_SIZE;
869 } else {
870 /* PTE is written in big endian format for
871 * 5706/5708/5709 devices */
872 *ptbl = (u32) ((u64) page >> 32);
873 ptbl++;
874 *ptbl = (u32) page;
875 ptbl++;
876 page += PAGE_SIZE;
877 }
878 }
879
880 /* RQ page table */
881 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
882 num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
883 page = ep->qp.rq_phys;
884
885 if (cnic_dev_10g)
886 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
887 else
888 ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
889 while (num_pages--) {
890 if (cnic_dev_10g) {
891 /* PTE is written in little endian format for 57710 */
892 *ptbl = (u32) page;
893 ptbl++;
894 *ptbl = (u32) ((u64) page >> 32);
895 ptbl++;
896 page += PAGE_SIZE;
897 } else {
898 /* PTE is written in big endian format for
899 * 5706/5708/5709 devices */
900 *ptbl = (u32) ((u64) page >> 32);
901 ptbl++;
902 *ptbl = (u32) page;
903 ptbl++;
904 page += PAGE_SIZE;
905 }
906 }
907
908 /* CQ page table */
909 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
910 num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
911 page = ep->qp.cq_phys;
912
913 if (cnic_dev_10g)
914 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
915 else
916 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
917 while (num_pages--) {
918 if (cnic_dev_10g) {
919 /* PTE is written in little endian format for 57710 */
920 *ptbl = (u32) page;
921 ptbl++;
922 *ptbl = (u32) ((u64) page >> 32);
923 ptbl++;
924 page += PAGE_SIZE;
925 } else {
926 /* PTE is written in big endian format for
927 * 5706/5708/5709 devices */
928 *ptbl = (u32) ((u64) page >> 32);
929 ptbl++;
930 *ptbl = (u32) page;
931 ptbl++;
932 page += PAGE_SIZE;
933 }
934 }
935}
936
937
938/**
939 * bnx2i_alloc_qp_resc - allocates required resources for QP.
940 * @hba: adapter structure pointer
941 * @ep: endpoint (transport indentifier) structure
942 *
943 * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
944 * memory for SQ/RQ/CQ and page tables. EP structure elements such
945 * as producer/consumer indexes/pointers, queue sizes and page table
946 * contents are setup
947 */
948int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
949{
950 struct bnx2i_5771x_cq_db *cq_db;
951
952 ep->hba = hba;
953 ep->conn = NULL;
954 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
955
956 /* Allocate page table memory for SQ which is page aligned */
957 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
958 ep->qp.sq_mem_size =
959 (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
960 ep->qp.sq_pgtbl_size =
961 (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
962 ep->qp.sq_pgtbl_size =
963 (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
964
965 ep->qp.sq_pgtbl_virt =
966 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
967 &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
968 if (!ep->qp.sq_pgtbl_virt) {
969 printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
970 ep->qp.sq_pgtbl_size);
971 goto mem_alloc_err;
972 }
973
974 /* Allocate memory area for actual SQ element */
975 ep->qp.sq_virt =
976 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
977 &ep->qp.sq_phys, GFP_KERNEL);
978 if (!ep->qp.sq_virt) {
979 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
980 ep->qp.sq_mem_size);
981 goto mem_alloc_err;
982 }
983
984 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
985 ep->qp.sq_first_qe = ep->qp.sq_virt;
986 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
987 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
988 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
989 ep->qp.sq_prod_idx = 0;
990 ep->qp.sq_cons_idx = 0;
991 ep->qp.sqe_left = hba->max_sqes;
992
993 /* Allocate page table memory for CQ which is page aligned */
994 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
995 ep->qp.cq_mem_size =
996 (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
997 ep->qp.cq_pgtbl_size =
998 (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
999 ep->qp.cq_pgtbl_size =
1000 (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1001
1002 ep->qp.cq_pgtbl_virt =
1003 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1004 &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1005 if (!ep->qp.cq_pgtbl_virt) {
1006 printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
1007 ep->qp.cq_pgtbl_size);
1008 goto mem_alloc_err;
1009 }
1010
1011 /* Allocate memory area for actual CQ element */
1012 ep->qp.cq_virt =
1013 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1014 &ep->qp.cq_phys, GFP_KERNEL);
1015 if (!ep->qp.cq_virt) {
1016 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1017 ep->qp.cq_mem_size);
1018 goto mem_alloc_err;
1019 }
1020 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1021
1022 ep->qp.cq_first_qe = ep->qp.cq_virt;
1023 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1024 ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1025 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1026 ep->qp.cq_prod_idx = 0;
1027 ep->qp.cq_cons_idx = 0;
1028 ep->qp.cqe_left = hba->max_cqes;
1029 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1030 ep->qp.cqe_size = hba->max_cqes;
1031
1032 /* Invalidate all EQ CQE index, req only for 57710 */
1033 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1034 memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
1035
1036 /* Allocate page table memory for RQ which is page aligned */
1037 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1038 ep->qp.rq_mem_size =
1039 (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1040 ep->qp.rq_pgtbl_size =
1041 (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
1042 ep->qp.rq_pgtbl_size =
1043 (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1044
1045 ep->qp.rq_pgtbl_virt =
1046 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1047 &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1048 if (!ep->qp.rq_pgtbl_virt) {
1049 printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
1050 ep->qp.rq_pgtbl_size);
1051 goto mem_alloc_err;
1052 }
1053
1054 /* Allocate memory area for actual RQ element */
1055 ep->qp.rq_virt =
1056 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1057 &ep->qp.rq_phys, GFP_KERNEL);
1058 if (!ep->qp.rq_virt) {
1059 printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
1060 ep->qp.rq_mem_size);
1061 goto mem_alloc_err;
1062 }
1063
1064 ep->qp.rq_first_qe = ep->qp.rq_virt;
1065 ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1066 ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1067 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1068 ep->qp.rq_prod_idx = 0x8000;
1069 ep->qp.rq_cons_idx = 0;
1070 ep->qp.rqe_left = hba->max_rqes;
1071
1072 setup_qp_page_tables(ep);
1073
1074 return 0;
1075
1076mem_alloc_err:
1077 bnx2i_free_qp_resc(hba, ep);
1078 return -ENOMEM;
1079}
1080
1081
1082
1083/**
1084 * bnx2i_free_qp_resc - free memory resources held by QP
1085 * @hba: adapter structure pointer
1086 * @ep: endpoint (transport indentifier) structure
1087 *
1088 * Free QP resources - SQ/RQ/CQ memory and page tables.
1089 */
1090void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1091{
1092 if (ep->qp.ctx_base) {
1093 iounmap(ep->qp.ctx_base);
1094 ep->qp.ctx_base = NULL;
1095 }
1096 /* Free SQ mem */
1097 if (ep->qp.sq_pgtbl_virt) {
1098 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1099 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1100 ep->qp.sq_pgtbl_virt = NULL;
1101 ep->qp.sq_pgtbl_phys = 0;
1102 }
1103 if (ep->qp.sq_virt) {
1104 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1105 ep->qp.sq_virt, ep->qp.sq_phys);
1106 ep->qp.sq_virt = NULL;
1107 ep->qp.sq_phys = 0;
1108 }
1109
1110 /* Free RQ mem */
1111 if (ep->qp.rq_pgtbl_virt) {
1112 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1113 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1114 ep->qp.rq_pgtbl_virt = NULL;
1115 ep->qp.rq_pgtbl_phys = 0;
1116 }
1117 if (ep->qp.rq_virt) {
1118 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1119 ep->qp.rq_virt, ep->qp.rq_phys);
1120 ep->qp.rq_virt = NULL;
1121 ep->qp.rq_phys = 0;
1122 }
1123
1124 /* Free CQ mem */
1125 if (ep->qp.cq_pgtbl_virt) {
1126 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1127 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1128 ep->qp.cq_pgtbl_virt = NULL;
1129 ep->qp.cq_pgtbl_phys = 0;
1130 }
1131 if (ep->qp.cq_virt) {
1132 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1133 ep->qp.cq_virt, ep->qp.cq_phys);
1134 ep->qp.cq_virt = NULL;
1135 ep->qp.cq_phys = 0;
1136 }
1137}
1138
1139
1140/**
1141 * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1142 * @hba: adapter structure pointer
1143 *
1144 * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1145 * This results in iSCSi support validation and on-chip context manager
1146 * initialization. Firmware completes this handshake with a CQE carrying
1147 * the result of iscsi support validation. Parameter carried by
1148 * iscsi init request determines the number of offloaded connection and
1149 * tolerance level for iscsi protocol violation this hba/chip can support
1150 */
1151int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1152{
1153 struct kwqe *kwqe_arr[3];
1154 struct iscsi_kwqe_init1 iscsi_init;
1155 struct iscsi_kwqe_init2 iscsi_init2;
1156 int rc = 0;
1157 u64 mask64;
1158
1159 bnx2i_adjust_qp_size(hba);
1160
1161 iscsi_init.flags =
1162 ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1163 if (en_tcp_dack)
1164 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1165 iscsi_init.reserved0 = 0;
1166 iscsi_init.num_cqs = 1;
1167 iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
1168 iscsi_init.hdr.flags =
1169 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1170
1171 iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
1172 iscsi_init.dummy_buffer_addr_hi =
1173 (u32) ((u64) hba->dummy_buf_dma >> 32);
1174
1175 hba->ctx_ccell_tasks =
1176 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1177 iscsi_init.num_ccells_per_conn = hba->num_ccell;
1178 iscsi_init.num_tasks_per_conn = hba->max_sqes;
1179 iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1180 iscsi_init.sq_num_wqes = hba->max_sqes;
1181 iscsi_init.cq_log_wqes_per_page =
1182 (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
1183 iscsi_init.cq_num_wqes = hba->max_cqes;
1184 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1185 (PAGE_SIZE - 1)) / PAGE_SIZE;
1186 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1187 (PAGE_SIZE - 1)) / PAGE_SIZE;
1188 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1189 iscsi_init.rq_num_wqes = hba->max_rqes;
1190
1191
1192 iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
1193 iscsi_init2.hdr.flags =
1194 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1195 iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
1196 mask64 = 0x0ULL;
1197 mask64 |= (
1198 /* CISCO MDS */
1199 (1UL <<
1200 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
1201 /* HP MSA1510i */
1202 (1UL <<
1203 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1204 /* EMC */
1205 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1206 if (error_mask1)
1207 iscsi_init2.error_bit_map[0] = error_mask1;
1208 else
1209 iscsi_init2.error_bit_map[0] = (u32) mask64;
1210
1211 if (error_mask2)
1212 iscsi_init2.error_bit_map[1] = error_mask2;
1213 else
1214 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1215
1216 iscsi_error_mask = mask64;
1217
1218 kwqe_arr[0] = (struct kwqe *) &iscsi_init;
1219 kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
1220
1221 if (hba->cnic && hba->cnic->submit_kwqes)
1222 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
1223 return rc;
1224}
1225
1226
1227/**
1228 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1229 * @conn: iscsi connection
1230 * @cqe: pointer to newly DMA'ed CQE entry for processing
1231 *
1232 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1233 */
1234static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1235 struct bnx2i_conn *bnx2i_conn,
1236 struct cqe *cqe)
1237{
1238 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1239 struct bnx2i_cmd_response *resp_cqe;
1240 struct bnx2i_cmd *bnx2i_cmd;
1241 struct iscsi_task *task;
1242 struct iscsi_cmd_rsp *hdr;
1243 u32 datalen = 0;
1244
1245 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1246 spin_lock(&session->lock);
1247 task = iscsi_itt_to_task(conn,
1248 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1249 if (!task)
1250 goto fail;
1251
1252 bnx2i_cmd = task->dd_data;
1253
1254 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1255 conn->datain_pdus_cnt +=
1256 resp_cqe->task_stat.read_stat.num_data_outs;
1257 conn->rxdata_octets +=
1258 bnx2i_cmd->req.total_data_transfer_length;
1259 } else {
1260 conn->dataout_pdus_cnt +=
1261 resp_cqe->task_stat.read_stat.num_data_outs;
1262 conn->r2t_pdus_cnt +=
1263 resp_cqe->task_stat.read_stat.num_r2ts;
1264 conn->txdata_octets +=
1265 bnx2i_cmd->req.total_data_transfer_length;
1266 }
1267 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1268
1269 hdr = (struct iscsi_cmd_rsp *)task->hdr;
1270 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1271 hdr->opcode = resp_cqe->op_code;
1272 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
1273 hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
1274 hdr->response = resp_cqe->response;
1275 hdr->cmd_status = resp_cqe->status;
1276 hdr->flags = resp_cqe->response_flags;
1277 hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
1278
1279 if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
1280 goto done;
1281
1282 if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
1283 datalen = resp_cqe->data_length;
1284 if (datalen < 2)
1285 goto done;
1286
1287 if (datalen > BNX2I_RQ_WQE_SIZE) {
1288 iscsi_conn_printk(KERN_ERR, conn,
1289 "sense data len %d > RQ sz\n",
1290 datalen);
1291 datalen = BNX2I_RQ_WQE_SIZE;
1292 } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
1293 iscsi_conn_printk(KERN_ERR, conn,
1294 "sense data len %d > conn data\n",
1295 datalen);
1296 datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
1297 }
1298
1299 bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
1300 bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
1301 }
1302
1303done:
1304 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1305 conn->data, datalen);
1306fail:
1307 spin_unlock(&session->lock);
1308 return 0;
1309}
1310
1311
1312/**
1313 * bnx2i_process_login_resp - this function handles iscsi login response
1314 * @session: iscsi session pointer
1315 * @bnx2i_conn: iscsi connection pointer
1316 * @cqe: pointer to newly DMA'ed CQE entry for processing
1317 *
1318 * process Login Response CQE & complete it to open-iscsi user daemon
1319 */
1320static int bnx2i_process_login_resp(struct iscsi_session *session,
1321 struct bnx2i_conn *bnx2i_conn,
1322 struct cqe *cqe)
1323{
1324 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1325 struct iscsi_task *task;
1326 struct bnx2i_login_response *login;
1327 struct iscsi_login_rsp *resp_hdr;
1328 int pld_len;
1329 int pad_len;
1330
1331 login = (struct bnx2i_login_response *) cqe;
1332 spin_lock(&session->lock);
1333 task = iscsi_itt_to_task(conn,
1334 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1335 if (!task)
1336 goto done;
1337
1338 resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1339 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1340 resp_hdr->opcode = login->op_code;
1341 resp_hdr->flags = login->response_flags;
1342 resp_hdr->max_version = login->version_max;
1343 resp_hdr->active_version = login->version_active;;
1344 resp_hdr->hlength = 0;
1345
1346 hton24(resp_hdr->dlength, login->data_length);
1347 memcpy(resp_hdr->isid, &login->isid_lo, 6);
1348 resp_hdr->tsih = cpu_to_be16(login->tsih);
1349 resp_hdr->itt = task->hdr->itt;
1350 resp_hdr->statsn = cpu_to_be32(login->stat_sn);
1351 resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
1352 resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
1353 resp_hdr->status_class = login->status_class;
1354 resp_hdr->status_detail = login->status_detail;
1355 pld_len = login->data_length;
1356 bnx2i_conn->gen_pdu.resp_wr_ptr =
1357 bnx2i_conn->gen_pdu.resp_buf + pld_len;
1358
1359 pad_len = 0;
1360 if (pld_len & 0x3)
1361 pad_len = 4 - (pld_len % 4);
1362
1363 if (pad_len) {
1364 int i = 0;
1365 for (i = 0; i < pad_len; i++) {
1366 bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1367 bnx2i_conn->gen_pdu.resp_wr_ptr++;
1368 }
1369 }
1370
1371 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1372 bnx2i_conn->gen_pdu.resp_buf,
1373 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1374done:
1375 spin_unlock(&session->lock);
1376 return 0;
1377}
1378
1379/**
1380 * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1381 * @session: iscsi session pointer
1382 * @bnx2i_conn: iscsi connection pointer
1383 * @cqe: pointer to newly DMA'ed CQE entry for processing
1384 *
1385 * process iSCSI TMF Response CQE and wake up the driver eh thread.
1386 */
1387static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1388 struct bnx2i_conn *bnx2i_conn,
1389 struct cqe *cqe)
1390{
1391 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1392 struct iscsi_task *task;
1393 struct bnx2i_tmf_response *tmf_cqe;
1394 struct iscsi_tm_rsp *resp_hdr;
1395
1396 tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1397 spin_lock(&session->lock);
1398 task = iscsi_itt_to_task(conn,
1399 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1400 if (!task)
1401 goto done;
1402
1403 resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1404 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1405 resp_hdr->opcode = tmf_cqe->op_code;
1406 resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
1407 resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
1408 resp_hdr->itt = task->hdr->itt;
1409 resp_hdr->response = tmf_cqe->response;
1410
1411 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1412done:
1413 spin_unlock(&session->lock);
1414 return 0;
1415}
1416
1417/**
1418 * bnx2i_process_logout_resp - this function handles iscsi logout response
1419 * @session: iscsi session pointer
1420 * @bnx2i_conn: iscsi connection pointer
1421 * @cqe: pointer to newly DMA'ed CQE entry for processing
1422 *
1423 * process iSCSI Logout Response CQE & make function call to
1424 * notify the user daemon.
1425 */
1426static int bnx2i_process_logout_resp(struct iscsi_session *session,
1427 struct bnx2i_conn *bnx2i_conn,
1428 struct cqe *cqe)
1429{
1430 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1431 struct iscsi_task *task;
1432 struct bnx2i_logout_response *logout;
1433 struct iscsi_logout_rsp *resp_hdr;
1434
1435 logout = (struct bnx2i_logout_response *) cqe;
1436 spin_lock(&session->lock);
1437 task = iscsi_itt_to_task(conn,
1438 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1439 if (!task)
1440 goto done;
1441
1442 resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1443 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1444 resp_hdr->opcode = logout->op_code;
1445 resp_hdr->flags = logout->response;
1446 resp_hdr->hlength = 0;
1447
1448 resp_hdr->itt = task->hdr->itt;
1449 resp_hdr->statsn = task->hdr->exp_statsn;
1450 resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
1451 resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
1452
1453 resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
1454 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1455
1456 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1457done:
1458 spin_unlock(&session->lock);
1459 return 0;
1460}
1461
1462/**
1463 * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1464 * @session: iscsi session pointer
1465 * @bnx2i_conn: iscsi connection pointer
1466 * @cqe: pointer to newly DMA'ed CQE entry for processing
1467 *
1468 * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1469 */
1470static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1471 struct bnx2i_conn *bnx2i_conn,
1472 struct cqe *cqe)
1473{
1474 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1475 struct bnx2i_nop_in_msg *nop_in;
1476 struct iscsi_task *task;
1477
1478 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1479 spin_lock(&session->lock);
1480 task = iscsi_itt_to_task(conn,
1481 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1482 if (task)
1483 iscsi_put_task(task);
1484 spin_unlock(&session->lock);
1485}
1486
1487/**
1488 * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1489 * @conn: iscsi connection
1490 *
1491 * Firmware advances RQ producer index for every unsolicited PDU even if
1492 * payload data length is '0'. This function makes corresponding
1493 * adjustments on the driver side to match this f/w behavior
1494 */
1495static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
1496{
1497 char dummy_rq_data[2];
1498 bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
1499 bnx2i_put_rq_buf(bnx2i_conn, 1);
1500}
1501
1502
1503/**
1504 * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1505 * @session: iscsi session pointer
1506 * @bnx2i_conn: iscsi connection pointer
1507 * @cqe: pointer to newly DMA'ed CQE entry for processing
1508 *
1509 * process iSCSI target's proactive iSCSI NOPIN request
1510 */
1511static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1512 struct bnx2i_conn *bnx2i_conn,
1513 struct cqe *cqe)
1514{
1515 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1516 struct iscsi_task *task;
1517 struct bnx2i_nop_in_msg *nop_in;
1518 struct iscsi_nopin *hdr;
1519 u32 itt;
1520 int tgt_async_nop = 0;
1521
1522 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1523 itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
1524
1525 spin_lock(&session->lock);
1526 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1527 memset(hdr, 0, sizeof(struct iscsi_hdr));
1528 hdr->opcode = nop_in->op_code;
1529 hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
1530 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1531 hdr->ttt = cpu_to_be32(nop_in->ttt);
1532
1533 if (itt == (u16) RESERVED_ITT) {
1534 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1535 hdr->itt = RESERVED_ITT;
1536 tgt_async_nop = 1;
1537 goto done;
1538 }
1539
1540 /* this is a response to one of our nop-outs */
1541 task = iscsi_itt_to_task(conn, itt);
1542 if (task) {
1543 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1544 hdr->itt = task->hdr->itt;
1545 hdr->ttt = cpu_to_be32(nop_in->ttt);
1546 memcpy(hdr->lun, nop_in->lun, 8);
1547 }
1548done:
1549 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1550 spin_unlock(&session->lock);
1551
1552 return tgt_async_nop;
1553}
1554
1555
1556/**
1557 * bnx2i_process_async_mesg - this function handles iscsi async message
1558 * @session: iscsi session pointer
1559 * @bnx2i_conn: iscsi connection pointer
1560 * @cqe: pointer to newly DMA'ed CQE entry for processing
1561 *
1562 * process iSCSI ASYNC Message
1563 */
1564static void bnx2i_process_async_mesg(struct iscsi_session *session,
1565 struct bnx2i_conn *bnx2i_conn,
1566 struct cqe *cqe)
1567{
1568 struct bnx2i_async_msg *async_cqe;
1569 struct iscsi_async *resp_hdr;
1570 u8 async_event;
1571
1572 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1573
1574 async_cqe = (struct bnx2i_async_msg *)cqe;
1575 async_event = async_cqe->async_event;
1576
1577 if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
1578 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1579 "async: scsi events not supported\n");
1580 return;
1581 }
1582
1583 spin_lock(&session->lock);
1584 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1585 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1586 resp_hdr->opcode = async_cqe->op_code;
1587 resp_hdr->flags = 0x80;
1588
1589 memcpy(resp_hdr->lun, async_cqe->lun, 8);
1590 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1591 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1592
1593 resp_hdr->async_event = async_cqe->async_event;
1594 resp_hdr->async_vcode = async_cqe->async_vcode;
1595
1596 resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
1597 resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
1598 resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
1599
1600 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1601 (struct iscsi_hdr *)resp_hdr, NULL, 0);
1602 spin_unlock(&session->lock);
1603}
1604
1605
1606/**
1607 * bnx2i_process_reject_mesg - process iscsi reject pdu
1608 * @session: iscsi session pointer
1609 * @bnx2i_conn: iscsi connection pointer
1610 * @cqe: pointer to newly DMA'ed CQE entry for processing
1611 *
1612 * process iSCSI REJECT message
1613 */
1614static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1615 struct bnx2i_conn *bnx2i_conn,
1616 struct cqe *cqe)
1617{
1618 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1619 struct bnx2i_reject_msg *reject;
1620 struct iscsi_reject *hdr;
1621
1622 reject = (struct bnx2i_reject_msg *) cqe;
1623 if (reject->data_length) {
1624 bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
1625 bnx2i_put_rq_buf(bnx2i_conn, 1);
1626 } else
1627 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1628
1629 spin_lock(&session->lock);
1630 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1631 memset(hdr, 0, sizeof(struct iscsi_hdr));
1632 hdr->opcode = reject->op_code;
1633 hdr->reason = reject->reason;
1634 hton24(hdr->dlength, reject->data_length);
1635 hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
1636 hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
1637 hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1638 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1639 reject->data_length);
1640 spin_unlock(&session->lock);
1641}
1642
1643/**
1644 * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1645 * @session: iscsi session pointer
1646 * @bnx2i_conn: iscsi connection pointer
1647 * @cqe: pointer to newly DMA'ed CQE entry for processing
1648 *
1649 * process command cleanup response CQE during conn shutdown or error recovery
1650 */
1651static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1652 struct bnx2i_conn *bnx2i_conn,
1653 struct cqe *cqe)
1654{
1655 struct bnx2i_cleanup_response *cmd_clean_rsp;
1656 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1657 struct iscsi_task *task;
1658
1659 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1660 spin_lock(&session->lock);
1661 task = iscsi_itt_to_task(conn,
1662 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1663 if (!task)
1664 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1665 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1666 spin_unlock(&session->lock);
1667 complete(&bnx2i_conn->cmd_cleanup_cmpl);
1668}
1669
1670
1671
1672/**
1673 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1674 * @bnx2i_conn: iscsi connection
1675 *
1676 * this function is called by generic KCQ handler to process all pending CQE's
1677 */
1678static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1679{
1680 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1681 struct iscsi_session *session = conn->session;
1682 struct qp_info *qp = &bnx2i_conn->ep->qp;
1683 struct bnx2i_nop_in_msg *nopin;
1684 int tgt_async_msg;
1685
1686 while (1) {
1687 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1688 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1689 break;
1690
1691 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
1692 break;
1693
1694 tgt_async_msg = 0;
1695
1696 switch (nopin->op_code) {
1697 case ISCSI_OP_SCSI_CMD_RSP:
1698 case ISCSI_OP_SCSI_DATA_IN:
1699 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
1700 qp->cq_cons_qe);
1701 break;
1702 case ISCSI_OP_LOGIN_RSP:
1703 bnx2i_process_login_resp(session, bnx2i_conn,
1704 qp->cq_cons_qe);
1705 break;
1706 case ISCSI_OP_SCSI_TMFUNC_RSP:
1707 bnx2i_process_tmf_resp(session, bnx2i_conn,
1708 qp->cq_cons_qe);
1709 break;
1710 case ISCSI_OP_LOGOUT_RSP:
1711 bnx2i_process_logout_resp(session, bnx2i_conn,
1712 qp->cq_cons_qe);
1713 break;
1714 case ISCSI_OP_NOOP_IN:
1715 if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
1716 qp->cq_cons_qe))
1717 tgt_async_msg = 1;
1718 break;
1719 case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
1720 bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
1721 qp->cq_cons_qe);
1722 break;
1723 case ISCSI_OP_ASYNC_EVENT:
1724 bnx2i_process_async_mesg(session, bnx2i_conn,
1725 qp->cq_cons_qe);
1726 tgt_async_msg = 1;
1727 break;
1728 case ISCSI_OP_REJECT:
1729 bnx2i_process_reject_mesg(session, bnx2i_conn,
1730 qp->cq_cons_qe);
1731 break;
1732 case ISCSI_OPCODE_CLEANUP_RESPONSE:
1733 bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
1734 qp->cq_cons_qe);
1735 break;
1736 default:
1737 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1738 nopin->op_code);
1739 }
1740
1741 if (!tgt_async_msg)
1742 bnx2i_conn->ep->num_active_cmds--;
1743
1744 /* clear out in production version only, till beta keep opcode
1745 * field intact, will be helpful in debugging (context dump)
1746 * nopin->op_code = 0;
1747 */
1748 qp->cqe_exp_seq_sn++;
1749 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
1750 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1751
1752 if (qp->cq_cons_qe == qp->cq_last_qe) {
1753 qp->cq_cons_qe = qp->cq_first_qe;
1754 qp->cq_cons_idx = 0;
1755 } else {
1756 qp->cq_cons_qe++;
1757 qp->cq_cons_idx++;
1758 }
1759 }
1760 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1761}
1762
1763/**
1764 * bnx2i_fastpath_notification - process global event queue (KCQ)
1765 * @hba: adapter structure pointer
1766 * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
1767 *
1768 * Fast path event notification handler, KCQ entry carries context id
1769 * of the connection that has 1 or more pending CQ entries
1770 */
1771static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1772 struct iscsi_kcqe *new_cqe_kcqe)
1773{
1774 struct bnx2i_conn *conn;
1775 u32 iscsi_cid;
1776
1777 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1778 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1779
1780 if (!conn) {
1781 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1782 return;
1783 }
1784 if (!conn->ep) {
1785 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1786 return;
1787 }
1788
1789 bnx2i_process_new_cqes(conn);
1790}
1791
1792
1793/**
1794 * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
1795 * @hba: adapter structure pointer
1796 * @update_kcqe: kcqe pointer
1797 *
1798 * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
1799 */
1800static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
1801 struct iscsi_kcqe *update_kcqe)
1802{
1803 struct bnx2i_conn *conn;
1804 u32 iscsi_cid;
1805
1806 iscsi_cid = update_kcqe->iscsi_conn_id;
1807 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1808
1809 if (!conn) {
1810 printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
1811 return;
1812 }
1813 if (!conn->ep) {
1814 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
1815 return;
1816 }
1817
1818 if (update_kcqe->completion_status) {
1819 printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
1820 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
1821 } else
1822 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
1823
1824 wake_up_interruptible(&conn->ep->ofld_wait);
1825}
1826
1827
1828/**
1829 * bnx2i_recovery_que_add_conn - add connection to recovery queue
1830 * @hba: adapter structure pointer
1831 * @bnx2i_conn: iscsi connection
1832 *
1833 * Add connection to recovery queue and schedule adapter eh worker
1834 */
1835static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
1836 struct bnx2i_conn *bnx2i_conn)
1837{
1838 iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
1839 ISCSI_ERR_CONN_FAILED);
1840}
1841
1842
1843/**
1844 * bnx2i_process_tcp_error - process error notification on a given connection
1845 *
1846 * @hba: adapter structure pointer
1847 * @tcp_err: tcp error kcqe pointer
1848 *
1849 * handles tcp level error notifications from FW.
1850 */
1851static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
1852 struct iscsi_kcqe *tcp_err)
1853{
1854 struct bnx2i_conn *bnx2i_conn;
1855 u32 iscsi_cid;
1856
1857 iscsi_cid = tcp_err->iscsi_conn_id;
1858 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1859
1860 if (!bnx2i_conn) {
1861 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1862 return;
1863 }
1864
1865 printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
1866 iscsi_cid, tcp_err->completion_status);
1867 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
1868}
1869
1870
1871/**
1872 * bnx2i_process_iscsi_error - process error notification on a given connection
1873 * @hba: adapter structure pointer
1874 * @iscsi_err: iscsi error kcqe pointer
1875 *
1876 * handles iscsi error notifications from the FW. Firmware based in initial
1877 * handshake classifies iscsi protocol / TCP rfc violation into either
1878 * warning or error indications. If indication is of "Error" type, driver
1879 * will initiate session recovery for that connection/session. For
1880 * "Warning" type indication, driver will put out a system log message
1881 * (there will be only one message for each type for the life of the
1882 * session, this is to avoid un-necessarily overloading the system)
1883 */
1884static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
1885 struct iscsi_kcqe *iscsi_err)
1886{
1887 struct bnx2i_conn *bnx2i_conn;
1888 u32 iscsi_cid;
1889 char warn_notice[] = "iscsi_warning";
1890 char error_notice[] = "iscsi_error";
1891 char additional_notice[64];
1892 char *message;
1893 int need_recovery;
1894 u64 err_mask64;
1895
1896 iscsi_cid = iscsi_err->iscsi_conn_id;
1897 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1898 if (!bnx2i_conn) {
1899 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1900 return;
1901 }
1902
1903 err_mask64 = (0x1ULL << iscsi_err->completion_status);
1904
1905 if (err_mask64 & iscsi_error_mask) {
1906 need_recovery = 0;
1907 message = warn_notice;
1908 } else {
1909 need_recovery = 1;
1910 message = error_notice;
1911 }
1912
1913 switch (iscsi_err->completion_status) {
1914 case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
1915 strcpy(additional_notice, "hdr digest err");
1916 break;
1917 case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
1918 strcpy(additional_notice, "data digest err");
1919 break;
1920 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
1921 strcpy(additional_notice, "wrong opcode rcvd");
1922 break;
1923 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
1924 strcpy(additional_notice, "AHS len > 0 rcvd");
1925 break;
1926 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
1927 strcpy(additional_notice, "invalid ITT rcvd");
1928 break;
1929 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
1930 strcpy(additional_notice, "wrong StatSN rcvd");
1931 break;
1932 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
1933 strcpy(additional_notice, "wrong DataSN rcvd");
1934 break;
1935 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
1936 strcpy(additional_notice, "pend R2T violation");
1937 break;
1938 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
1939 strcpy(additional_notice, "ERL0, UO");
1940 break;
1941 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
1942 strcpy(additional_notice, "ERL0, U1");
1943 break;
1944 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
1945 strcpy(additional_notice, "ERL0, U2");
1946 break;
1947 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
1948 strcpy(additional_notice, "ERL0, U3");
1949 break;
1950 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
1951 strcpy(additional_notice, "ERL0, U4");
1952 break;
1953 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
1954 strcpy(additional_notice, "ERL0, U5");
1955 break;
1956 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
1957 strcpy(additional_notice, "ERL0, U6");
1958 break;
1959 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
1960 strcpy(additional_notice, "invalid resi len");
1961 break;
1962 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
1963 strcpy(additional_notice, "MRDSL violation");
1964 break;
1965 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
1966 strcpy(additional_notice, "F-bit not set");
1967 break;
1968 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
1969 strcpy(additional_notice, "invalid TTT");
1970 break;
1971 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
1972 strcpy(additional_notice, "invalid DataSN");
1973 break;
1974 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
1975 strcpy(additional_notice, "burst len violation");
1976 break;
1977 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
1978 strcpy(additional_notice, "buf offset violation");
1979 break;
1980 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
1981 strcpy(additional_notice, "invalid LUN field");
1982 break;
1983 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
1984 strcpy(additional_notice, "invalid R2TSN field");
1985 break;
1986#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
1987 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
1988 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
1989 strcpy(additional_notice, "invalid cmd len1");
1990 break;
1991#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
1992 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
1993 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
1994 strcpy(additional_notice, "invalid cmd len2");
1995 break;
1996 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
1997 strcpy(additional_notice,
1998 "pend r2t exceeds MaxOutstandingR2T value");
1999 break;
2000 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
2001 strcpy(additional_notice, "TTT is rsvd");
2002 break;
2003 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
2004 strcpy(additional_notice, "MBL violation");
2005 break;
2006#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
2007 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2008 case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
2009 strcpy(additional_notice, "data seg len != 0");
2010 break;
2011 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
2012 strcpy(additional_notice, "reject pdu len error");
2013 break;
2014 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
2015 strcpy(additional_notice, "async pdu len error");
2016 break;
2017 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
2018 strcpy(additional_notice, "nopin pdu len error");
2019 break;
2020#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
2021 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2022 case BNX2_ERR_PEND_R2T_IN_CLEANUP:
2023 strcpy(additional_notice, "pend r2t in cleanup");
2024 break;
2025
2026 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
2027 strcpy(additional_notice, "IP fragments rcvd");
2028 break;
2029 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
2030 strcpy(additional_notice, "IP options error");
2031 break;
2032 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
2033 strcpy(additional_notice, "urgent flag error");
2034 break;
2035 default:
2036 printk(KERN_ALERT "iscsi_err - unknown err %x\n",
2037 iscsi_err->completion_status);
2038 }
2039
2040 if (need_recovery) {
2041 iscsi_conn_printk(KERN_ALERT,
2042 bnx2i_conn->cls_conn->dd_data,
2043 "bnx2i: %s - %s\n",
2044 message, additional_notice);
2045
2046 iscsi_conn_printk(KERN_ALERT,
2047 bnx2i_conn->cls_conn->dd_data,
2048 "conn_err - hostno %d conn %p, "
2049 "iscsi_cid %x cid %x\n",
2050 bnx2i_conn->hba->shost->host_no,
2051 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2052 bnx2i_conn->ep->ep_cid);
2053 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2054 } else
2055 if (!test_and_set_bit(iscsi_err->completion_status,
2056 (void *) &bnx2i_conn->violation_notified))
2057 iscsi_conn_printk(KERN_ALERT,
2058 bnx2i_conn->cls_conn->dd_data,
2059 "bnx2i: %s - %s\n",
2060 message, additional_notice);
2061}
2062
2063
2064/**
2065 * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2066 * @hba: adapter structure pointer
2067 * @conn_destroy: conn destroy kcqe pointer
2068 *
2069 * handles connection destroy completion request.
2070 */
2071static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
2072 struct iscsi_kcqe *conn_destroy)
2073{
2074 struct bnx2i_endpoint *ep;
2075
2076 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2077 if (!ep) {
2078 printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
2079 "offload request, unexpected complection\n");
2080 return;
2081 }
2082
2083 if (hba != ep->hba) {
2084 printk(KERN_ALERT "conn destroy- error hba mis-match\n");
2085 return;
2086 }
2087
2088 if (conn_destroy->completion_status) {
2089 printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
2090 ep->state = EP_STATE_CLEANUP_FAILED;
2091 } else
2092 ep->state = EP_STATE_CLEANUP_CMPL;
2093 wake_up_interruptible(&ep->ofld_wait);
2094}
2095
2096
2097/**
2098 * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2099 * @hba: adapter structure pointer
2100 * @ofld_kcqe: conn offload kcqe pointer
2101 *
2102 * handles initial connection offload completion, ep_connect() thread is
2103 * woken-up to continue with LLP connect process
2104 */
2105static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2106 struct iscsi_kcqe *ofld_kcqe)
2107{
2108 u32 cid_addr;
2109 struct bnx2i_endpoint *ep;
2110 u32 cid_num;
2111
2112 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2113 if (!ep) {
2114 printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
2115 return;
2116 }
2117
2118 if (hba != ep->hba) {
2119 printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
2120 return;
2121 }
2122
2123 if (ofld_kcqe->completion_status) {
2124 if (ofld_kcqe->completion_status ==
2125 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2126 printk(KERN_ALERT "bnx2i: unable to allocate"
2127 " iSCSI context resources\n");
2128 ep->state = EP_STATE_OFLD_FAILED;
2129 } else {
2130 ep->state = EP_STATE_OFLD_COMPL;
2131 cid_addr = ofld_kcqe->iscsi_conn_context_id;
2132 cid_num = bnx2i_get_cid_num(ep);
2133 ep->ep_cid = cid_addr;
2134 ep->qp.ctx_base = NULL;
2135 }
2136 wake_up_interruptible(&ep->ofld_wait);
2137}
2138
2139/**
2140 * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2141 * @hba: adapter structure pointer
2142 * @update_kcqe: kcqe pointer
2143 *
2144 * Generic KCQ event handler/dispatcher
2145 */
2146static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
2147 u32 num_cqe)
2148{
2149 struct bnx2i_hba *hba = context;
2150 int i = 0;
2151 struct iscsi_kcqe *ikcqe = NULL;
2152
2153 while (i < num_cqe) {
2154 ikcqe = (struct iscsi_kcqe *) kcqe[i++];
2155
2156 if (ikcqe->op_code ==
2157 ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
2158 bnx2i_fastpath_notification(hba, ikcqe);
2159 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
2160 bnx2i_process_ofld_cmpl(hba, ikcqe);
2161 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
2162 bnx2i_process_update_conn_cmpl(hba, ikcqe);
2163 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
2164 if (ikcqe->completion_status !=
2165 ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
2166 bnx2i_iscsi_license_error(hba, ikcqe->\
2167 completion_status);
2168 else {
2169 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2170 bnx2i_get_link_state(hba);
2171 printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
2172 "ISCSI_INIT passed\n",
2173 (u8)hba->pcidev->bus->number,
2174 hba->pci_devno,
2175 (u8)hba->pci_func);
2176
2177
2178 }
2179 } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
2180 bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
2181 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
2182 bnx2i_process_iscsi_error(hba, ikcqe);
2183 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
2184 bnx2i_process_tcp_error(hba, ikcqe);
2185 else
2186 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2187 ikcqe->op_code);
2188 }
2189}
2190
2191
2192/**
2193 * bnx2i_indicate_netevent - Generic netdev event handler
2194 * @context: adapter structure pointer
2195 * @event: event type
2196 *
2197 * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2198 * NETDEV_GOING_DOWN and NETDEV_CHANGE
2199 */
2200static void bnx2i_indicate_netevent(void *context, unsigned long event)
2201{
2202 struct bnx2i_hba *hba = context;
2203
2204 switch (event) {
2205 case NETDEV_UP:
2206 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
2207 bnx2i_send_fw_iscsi_init_msg(hba);
2208 break;
2209 case NETDEV_DOWN:
2210 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2211 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2212 break;
2213 case NETDEV_GOING_DOWN:
2214 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2215 iscsi_host_for_each_session(hba->shost,
2216 bnx2i_drop_session);
2217 break;
2218 case NETDEV_CHANGE:
2219 bnx2i_get_link_state(hba);
2220 break;
2221 default:
2222 ;
2223 }
2224}
2225
2226
2227/**
2228 * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2229 * @cm_sk: cnic sock structure pointer
2230 *
2231 * function callback exported via bnx2i - cnic driver interface to
2232 * indicate completion of option-2 TCP connect request.
2233 */
2234static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
2235{
2236 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2237
2238 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2239 ep->state = EP_STATE_CONNECT_FAILED;
2240 else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
2241 ep->state = EP_STATE_CONNECT_COMPL;
2242 else
2243 ep->state = EP_STATE_CONNECT_FAILED;
2244
2245 wake_up_interruptible(&ep->ofld_wait);
2246}
2247
2248
2249/**
2250 * bnx2i_cm_close_cmpl - process tcp conn close completion
2251 * @cm_sk: cnic sock structure pointer
2252 *
2253 * function callback exported via bnx2i - cnic driver interface to
2254 * indicate completion of option-2 graceful TCP connect shutdown
2255 */
2256static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
2257{
2258 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2259
2260 ep->state = EP_STATE_DISCONN_COMPL;
2261 wake_up_interruptible(&ep->ofld_wait);
2262}
2263
2264
2265/**
2266 * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2267 * @cm_sk: cnic sock structure pointer
2268 *
2269 * function callback exported via bnx2i - cnic driver interface to
2270 * indicate completion of option-2 abortive TCP connect termination
2271 */
2272static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
2273{
2274 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2275
2276 ep->state = EP_STATE_DISCONN_COMPL;
2277 wake_up_interruptible(&ep->ofld_wait);
2278}
2279
2280
2281/**
2282 * bnx2i_cm_remote_close - process received TCP FIN
2283 * @hba: adapter structure pointer
2284 * @update_kcqe: kcqe pointer
2285 *
2286 * function callback exported via bnx2i - cnic driver interface to indicate
2287 * async TCP events such as FIN
2288 */
2289static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2290{
2291 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2292
2293 ep->state = EP_STATE_TCP_FIN_RCVD;
2294 if (ep->conn)
2295 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2296}
2297
2298/**
2299 * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2300 * @hba: adapter structure pointer
2301 * @update_kcqe: kcqe pointer
2302 *
2303 * function callback exported via bnx2i - cnic driver interface to
2304 * indicate async TCP events (RST) sent by the peer.
2305 */
2306static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2307{
2308 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2309
2310 ep->state = EP_STATE_TCP_RST_RCVD;
2311 if (ep->conn)
2312 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2313}
2314
2315
2316static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
2317 char *buf, u16 buflen)
2318{
2319 struct bnx2i_hba *hba;
2320
2321 hba = bnx2i_find_hba_for_cnic(dev);
2322 if (!hba)
2323 return;
2324
2325 if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
2326 msg_type, buf, buflen))
2327 printk(KERN_ALERT "bnx2i: private nl message send error\n");
2328
2329}
2330
2331
2332/**
2333 * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2334 * carrying callback function pointers
2335 *
2336 */
2337struct cnic_ulp_ops bnx2i_cnic_cb = {
2338 .cnic_init = bnx2i_ulp_init,
2339 .cnic_exit = bnx2i_ulp_exit,
2340 .cnic_start = bnx2i_start,
2341 .cnic_stop = bnx2i_stop,
2342 .indicate_kcqes = bnx2i_indicate_kcqe,
2343 .indicate_netevent = bnx2i_indicate_netevent,
2344 .cm_connect_complete = bnx2i_cm_connect_cmpl,
2345 .cm_close_complete = bnx2i_cm_close_cmpl,
2346 .cm_abort_complete = bnx2i_cm_abort_cmpl,
2347 .cm_remote_close = bnx2i_cm_remote_close,
2348 .cm_remote_abort = bnx2i_cm_remote_abort,
2349 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2350 .owner = THIS_MODULE
2351};
2352
2353
2354/**
2355 * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2356 * @ep: bnx2i endpoint
2357 *
2358 * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2359 * register in BAR #0. Whereas in 57710 these register are accessed by
2360 * mapping BAR #1
2361 */
2362int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2363{
2364 u32 cid_num;
2365 u32 reg_off;
2366 u32 first_l4l5;
2367 u32 ctx_sz;
2368 u32 config2;
2369 resource_size_t reg_base;
2370
2371 cid_num = bnx2i_get_cid_num(ep);
2372
2373 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2374 reg_base = pci_resource_start(ep->hba->pcidev,
2375 BNX2X_DOORBELL_PCI_BAR);
2376 reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
2377 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2378 goto arm_cq;
2379 }
2380
2381 reg_base = ep->hba->netdev->base_addr;
2382 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2383 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2384 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2385 first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2387 if (ctx_sz)
2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2389 + PAGE_SIZE *
2390 (((cid_num - first_l4l5) / ctx_sz) + 256);
2391 else
2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2393 } else
2394 /* 5709 device in normal node and 5706/5708 devices */
2395 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2396
2397 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
2398 MB_KERNEL_CTX_SIZE);
2399 if (!ep->qp.ctx_base)
2400 return -ENOMEM;
2401
2402arm_cq:
2403 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
2404 return 0;
2405}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 000000000000..ae4b2d588fd3
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include "bnx2i.h"
15
16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count;
18static int bnx2i_reg_device;
19
20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.0.1d"
22#define DRV_MODULE_RELDATE "Mar 25, 2009"
23
24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
26 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28
29MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
30MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
31MODULE_LICENSE("GPL");
32MODULE_VERSION(DRV_MODULE_VERSION);
33
34static DEFINE_RWLOCK(bnx2i_dev_lock);
35
36unsigned int event_coal_div = 1;
37module_param(event_coal_div, int, 0664);
38MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
39
40unsigned int en_tcp_dack = 1;
41module_param(en_tcp_dack, int, 0664);
42MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
43
44unsigned int error_mask1 = 0x00;
45module_param(error_mask1, int, 0664);
46MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
47
48unsigned int error_mask2 = 0x00;
49module_param(error_mask2, int, 0664);
50MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
51
52unsigned int sq_size;
53module_param(sq_size, int, 0664);
54MODULE_PARM_DESC(sq_size, "Configure SQ size");
55
56unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
57module_param(rq_size, int, 0664);
58MODULE_PARM_DESC(rq_size, "Configure RQ size");
59
60u64 iscsi_error_mask = 0x00;
61
62static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
63
64
65/**
66 * bnx2i_identify_device - identifies NetXtreme II device type
67 * @hba: Adapter structure pointer
68 *
69 * This function identifies the NX2 device type and sets appropriate
70 * queue mailbox register access method, 5709 requires driver to
71 * access MBOX regs using *bin* mode
72 */
73void bnx2i_identify_device(struct bnx2i_hba *hba)
74{
75 hba->cnic_dev_type = 0;
76 if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
77 (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
78 set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
79 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
80 (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
81 set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
82 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
83 (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
84 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
85 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
86 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
87 hba->pci_did == PCI_DEVICE_ID_NX2_57711)
88 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
89}
90
91
92/**
93 * get_adapter_list_head - returns head of adapter list
94 */
95struct bnx2i_hba *get_adapter_list_head(void)
96{
97 struct bnx2i_hba *hba = NULL;
98 struct bnx2i_hba *tmp_hba;
99
100 if (!adapter_count)
101 goto hba_not_found;
102
103 read_lock(&bnx2i_dev_lock);
104 list_for_each_entry(tmp_hba, &adapter_list, link) {
105 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
106 hba = tmp_hba;
107 break;
108 }
109 }
110 read_unlock(&bnx2i_dev_lock);
111hba_not_found:
112 return hba;
113}
114
115
116/**
117 * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
118 * @cnic: pointer to cnic device instance
119 *
120 */
121struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
122{
123 struct bnx2i_hba *hba, *temp;
124
125 read_lock(&bnx2i_dev_lock);
126 list_for_each_entry_safe(hba, temp, &adapter_list, link) {
127 if (hba->cnic == cnic) {
128 read_unlock(&bnx2i_dev_lock);
129 return hba;
130 }
131 }
132 read_unlock(&bnx2i_dev_lock);
133 return NULL;
134}
135
136
137/**
138 * bnx2i_start - cnic callback to initialize & start adapter instance
139 * @handle: transparent handle pointing to adapter structure
140 *
141 * This function maps adapter structure to pcidev structure and initiates
142 * firmware handshake to enable/initialize on chip iscsi components
143 * This bnx2i - cnic interface api callback is issued after following
144 * 2 conditions are met -
145 * a) underlying network interface is up (marked by event 'NETDEV_UP'
146 * from netdev
147 * b) bnx2i adapter instance is registered
148 */
149void bnx2i_start(void *handle)
150{
151#define BNX2I_INIT_POLL_TIME (1000 / HZ)
152 struct bnx2i_hba *hba = handle;
153 int i = HZ;
154
155 bnx2i_send_fw_iscsi_init_msg(hba);
156 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
157 msleep(BNX2I_INIT_POLL_TIME);
158}
159
160
161/**
162 * bnx2i_stop - cnic callback to shutdown adapter instance
163 * @handle: transparent handle pointing to adapter structure
164 *
165 * driver checks if adapter is already in shutdown mode, if not start
166 * the shutdown process
167 */
168void bnx2i_stop(void *handle)
169{
170 struct bnx2i_hba *hba = handle;
171
172 /* check if cleanup happened in GOING_DOWN context */
173 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
174 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
175 &hba->adapter_state))
176 iscsi_host_for_each_session(hba->shost,
177 bnx2i_drop_session);
178}
179
180/**
181 * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
182 * @hba: Adapter instance to register
183 *
184 * registers bnx2i adapter instance with the cnic driver while holding the
185 * adapter structure lock
186 */
187void bnx2i_register_device(struct bnx2i_hba *hba)
188{
189 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
190 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
191 return;
192 }
193
194 hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
195
196 spin_lock(&hba->lock);
197 bnx2i_reg_device++;
198 spin_unlock(&hba->lock);
199
200 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
201}
202
203
204/**
205 * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
206 *
207 * registers all bnx2i adapter instances with the cnic driver while holding
208 * the global resource lock
209 */
210void bnx2i_reg_dev_all(void)
211{
212 struct bnx2i_hba *hba, *temp;
213
214 read_lock(&bnx2i_dev_lock);
215 list_for_each_entry_safe(hba, temp, &adapter_list, link)
216 bnx2i_register_device(hba);
217 read_unlock(&bnx2i_dev_lock);
218}
219
220
221/**
222 * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
223 * @hba: Adapter instance to unregister
224 *
225 * registers bnx2i adapter instance with the cnic driver while holding
226 * the adapter structure lock
227 */
228static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
229{
230 if (hba->ofld_conns_active ||
231 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
232 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
233 return;
234
235 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
236
237 spin_lock(&hba->lock);
238 bnx2i_reg_device--;
239 spin_unlock(&hba->lock);
240
241 /* ep_disconnect could come before NETDEV_DOWN, driver won't
242 * see NETDEV_DOWN as it already unregistered itself.
243 */
244 hba->adapter_state = 0;
245 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
246}
247
248/**
249 * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
250 *
251 * unregisters all bnx2i adapter instances with the cnic driver while holding
252 * the global resource lock
253 */
254void bnx2i_unreg_dev_all(void)
255{
256 struct bnx2i_hba *hba, *temp;
257
258 read_lock(&bnx2i_dev_lock);
259 list_for_each_entry_safe(hba, temp, &adapter_list, link)
260 bnx2i_unreg_one_device(hba);
261 read_unlock(&bnx2i_dev_lock);
262}
263
264
265/**
266 * bnx2i_init_one - initialize an adapter instance and allocate memory resources
267 * @hba: bnx2i adapter instance
268 * @cnic: cnic device handle
269 *
270 * Global resource lock and host adapter lock is held during critical sections
271 * below. This routine is called from cnic_register_driver() context and
272 * work horse thread which does majority of device specific initialization
273 */
274static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
275{
276 int rc;
277
278 read_lock(&bnx2i_dev_lock);
279 if (bnx2i_reg_device &&
280 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
281 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
282 if (rc) /* duplicate registration */
283 printk(KERN_ERR "bnx2i- dev reg failed\n");
284
285 spin_lock(&hba->lock);
286 bnx2i_reg_device++;
287 hba->age++;
288 spin_unlock(&hba->lock);
289
290 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
291 }
292 read_unlock(&bnx2i_dev_lock);
293
294 write_lock(&bnx2i_dev_lock);
295 list_add_tail(&hba->link, &adapter_list);
296 adapter_count++;
297 write_unlock(&bnx2i_dev_lock);
298 return 0;
299}
300
301
302/**
303 * bnx2i_ulp_init - initialize an adapter instance
304 * @dev: cnic device handle
305 *
306 * Called from cnic_register_driver() context to initialize all enumerated
307 * cnic devices. This routine allocate adapter structure and other
308 * device specific resources.
309 */
310void bnx2i_ulp_init(struct cnic_dev *dev)
311{
312 struct bnx2i_hba *hba;
313
314 /* Allocate a HBA structure for this device */
315 hba = bnx2i_alloc_hba(dev);
316 if (!hba) {
317 printk(KERN_ERR "bnx2i init: hba initialization failed\n");
318 return;
319 }
320
321 /* Get PCI related information and update hba struct members */
322 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
323 if (bnx2i_init_one(hba, dev)) {
324 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
325 bnx2i_free_hba(hba);
326 } else
327 hba->cnic = dev;
328}
329
330
331/**
332 * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
333 * @dev: cnic device handle
334 *
335 */
336void bnx2i_ulp_exit(struct cnic_dev *dev)
337{
338 struct bnx2i_hba *hba;
339
340 hba = bnx2i_find_hba_for_cnic(dev);
341 if (!hba) {
342 printk(KERN_INFO "bnx2i_ulp_exit: hba not "
343 "found, dev 0x%p\n", dev);
344 return;
345 }
346 write_lock(&bnx2i_dev_lock);
347 list_del_init(&hba->link);
348 adapter_count--;
349
350 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
351 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
352 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
353
354 spin_lock(&hba->lock);
355 bnx2i_reg_device--;
356 spin_unlock(&hba->lock);
357 }
358 write_unlock(&bnx2i_dev_lock);
359
360 bnx2i_free_hba(hba);
361}
362
363
364/**
365 * bnx2i_mod_init - module init entry point
366 *
367 * initialize any driver wide global data structures such as endpoint pool,
368 * tcp port manager/queue, sysfs. finally driver will register itself
369 * with the cnic module
370 */
371static int __init bnx2i_mod_init(void)
372{
373 int err;
374
375 printk(KERN_INFO "%s", version);
376
377 if (!is_power_of_2(sq_size))
378 sq_size = roundup_pow_of_two(sq_size);
379
380 bnx2i_scsi_xport_template =
381 iscsi_register_transport(&bnx2i_iscsi_transport);
382 if (!bnx2i_scsi_xport_template) {
383 printk(KERN_ERR "Could not register bnx2i transport.\n");
384 err = -ENOMEM;
385 goto out;
386 }
387
388 err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
389 if (err) {
390 printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
391 goto unreg_xport;
392 }
393
394 return 0;
395
396unreg_xport:
397 iscsi_unregister_transport(&bnx2i_iscsi_transport);
398out:
399 return err;
400}
401
402
403/**
404 * bnx2i_mod_exit - module cleanup/exit entry point
405 *
406 * Global resource lock and host adapter lock is held during critical sections
407 * in this function. Driver will browse through the adapter list, cleans-up
408 * each instance, unregisters iscsi transport name and finally driver will
409 * unregister itself with the cnic module
410 */
411static void __exit bnx2i_mod_exit(void)
412{
413 struct bnx2i_hba *hba;
414
415 write_lock(&bnx2i_dev_lock);
416 while (!list_empty(&adapter_list)) {
417 hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
418 list_del(&hba->link);
419 adapter_count--;
420
421 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
422 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
423 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
424 bnx2i_reg_device--;
425 }
426
427 write_unlock(&bnx2i_dev_lock);
428 bnx2i_free_hba(hba);
429 write_lock(&bnx2i_dev_lock);
430 }
431 write_unlock(&bnx2i_dev_lock);
432
433 iscsi_unregister_transport(&bnx2i_iscsi_transport);
434 cnic_unregister_driver(CNIC_ULP_ISCSI);
435}
436
437module_init(bnx2i_mod_init);
438module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 000000000000..f7412196f2f8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 *
4 * Copyright (c) 2006 - 2009 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 */
14
15#include <scsi/scsi_tcq.h>
16#include <scsi/libiscsi.h>
17#include "bnx2i.h"
18
19struct scsi_transport_template *bnx2i_scsi_xport_template;
20struct iscsi_transport bnx2i_iscsi_transport;
21static struct scsi_host_template bnx2i_host_template;
22
23/*
24 * Global endpoint resource info
25 */
26static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
27
28
29static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
30{
31 int retval = 0;
32
33 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
34 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
35 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
36 retval = -EPERM;
37 return retval;
38}
39
40/**
41 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
42 * @cmd: iscsi cmd struct pointer
43 * @buf_off: absolute buffer offset
44 * @start_bd_off: u32 pointer to return the offset within the BD
45 * indicated by 'start_bd_idx' on which 'buf_off' falls
46 * @start_bd_idx: index of the BD on which 'buf_off' falls
47 *
48 * identifies & marks various bd info for scsi command's imm data,
49 * unsolicited data and the first solicited data seq.
50 */
51static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
52 u32 *start_bd_off, u32 *start_bd_idx)
53{
54 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
55 u32 cur_offset = 0;
56 u32 cur_bd_idx = 0;
57
58 if (buf_off) {
59 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
60 cur_offset += bd_tbl->buffer_length;
61 cur_bd_idx++;
62 bd_tbl++;
63 }
64 }
65
66 *start_bd_off = buf_off - cur_offset;
67 *start_bd_idx = cur_bd_idx;
68}
69
70/**
71 * bnx2i_setup_write_cmd_bd_info - sets up BD various information
72 * @task: transport layer's cmd struct pointer
73 *
74 * identifies & marks various bd info for scsi command's immediate data,
75 * unsolicited data and first solicited data seq which includes BD start
76 * index & BD buf off. his function takes into account iscsi parameter such
77 * as immediate data and unsolicited data is support on this connection.
78 */
79static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
80{
81 struct bnx2i_cmd *cmd = task->dd_data;
82 u32 start_bd_offset;
83 u32 start_bd_idx;
84 u32 buffer_offset = 0;
85 u32 cmd_len = cmd->req.total_data_transfer_length;
86
87 /* if ImmediateData is turned off & IntialR2T is turned on,
88 * there will be no immediate or unsolicited data, just return.
89 */
90 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
91 return;
92
93 /* Immediate data */
94 buffer_offset += task->imm_count;
95 if (task->imm_count == cmd_len)
96 return;
97
98 if (iscsi_task_has_unsol_data(task)) {
99 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
100 &start_bd_offset, &start_bd_idx);
101 cmd->req.ud_buffer_offset = start_bd_offset;
102 cmd->req.ud_start_bd_index = start_bd_idx;
103 buffer_offset += task->unsol_r2t.data_length;
104 }
105
106 if (buffer_offset != cmd_len) {
107 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
108 &start_bd_offset, &start_bd_idx);
109 if ((start_bd_offset > task->conn->session->first_burst) ||
110 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
111 int i = 0;
112
113 iscsi_conn_printk(KERN_ALERT, task->conn,
114 "bnx2i- error, buf offset 0x%x "
115 "bd_valid %d use_sg %d\n",
116 buffer_offset, cmd->io_tbl.bd_valid,
117 scsi_sg_count(cmd->scsi_cmd));
118 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
119 iscsi_conn_printk(KERN_ALERT, task->conn,
120 "bnx2i err, bd[%d]: len %x\n",
121 i, cmd->io_tbl.bd_tbl[i].\
122 buffer_length);
123 }
124 cmd->req.sd_buffer_offset = start_bd_offset;
125 cmd->req.sd_start_bd_index = start_bd_idx;
126 }
127}
128
129
130
131/**
132 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
133 * @hba: adapter instance
134 * @cmd: iscsi cmd struct pointer
135 *
136 * map SG list
137 */
138static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
139{
140 struct scsi_cmnd *sc = cmd->scsi_cmd;
141 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
142 struct scatterlist *sg;
143 int byte_count = 0;
144 int bd_count = 0;
145 int sg_count;
146 int sg_len;
147 u64 addr;
148 int i;
149
150 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
151
152 sg_count = scsi_dma_map(sc);
153
154 scsi_for_each_sg(sc, sg, sg_count, i) {
155 sg_len = sg_dma_len(sg);
156 addr = (u64) sg_dma_address(sg);
157 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
158 bd[bd_count].buffer_addr_hi = addr >> 32;
159 bd[bd_count].buffer_length = sg_len;
160 bd[bd_count].flags = 0;
161 if (bd_count == 0)
162 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
163
164 byte_count += sg_len;
165 bd_count++;
166 }
167
168 if (bd_count)
169 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
170
171 BUG_ON(byte_count != scsi_bufflen(sc));
172 return bd_count;
173}
174
175/**
176 * bnx2i_iscsi_map_sg_list - maps SG list
177 * @cmd: iscsi cmd struct pointer
178 *
179 * creates BD list table for the command
180 */
181static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
182{
183 int bd_count;
184
185 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
186 if (!bd_count) {
187 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
188
189 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
190 bd[0].buffer_length = bd[0].flags = 0;
191 }
192 cmd->io_tbl.bd_valid = bd_count;
193}
194
195
196/**
197 * bnx2i_iscsi_unmap_sg_list - unmaps SG list
198 * @cmd: iscsi cmd struct pointer
199 *
200 * unmap IO buffers and invalidate the BD table
201 */
202void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
203{
204 struct scsi_cmnd *sc = cmd->scsi_cmd;
205
206 if (cmd->io_tbl.bd_valid && sc) {
207 scsi_dma_unmap(sc);
208 cmd->io_tbl.bd_valid = 0;
209 }
210}
211
212static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
213{
214 memset(&cmd->req, 0x00, sizeof(cmd->req));
215 cmd->req.op_code = 0xFF;
216 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
217 cmd->req.bd_list_addr_hi =
218 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
219
220}
221
222
223/**
224 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
225 * @hba: pointer to adapter instance
226 * @conn: pointer to iscsi connection
227 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
228 *
229 * update iscsi cid table entry with connection pointer. This enables
230 * driver to quickly get hold of connection structure pointer in
231 * completion/interrupt thread using iscsi context ID
232 */
233static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
234 struct bnx2i_conn *bnx2i_conn,
235 u32 iscsi_cid)
236{
237 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
238 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
239 "conn bind - entry #%d not free\n", iscsi_cid);
240 return -EBUSY;
241 }
242
243 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
244 return 0;
245}
246
247
248/**
249 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
250 * @hba: pointer to adapter instance
251 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
252 */
253struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
254 u16 iscsi_cid)
255{
256 if (!hba->cid_que.conn_cid_tbl) {
257 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
258 return NULL;
259
260 } else if (iscsi_cid >= hba->max_active_conns) {
261 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
262 return NULL;
263 }
264 return hba->cid_que.conn_cid_tbl[iscsi_cid];
265}
266
267
268/**
269 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
270 * @hba: pointer to adapter instance
271 */
272static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
273{
274 int idx;
275
276 if (!hba->cid_que.cid_free_cnt)
277 return -1;
278
279 idx = hba->cid_que.cid_q_cons_idx;
280 hba->cid_que.cid_q_cons_idx++;
281 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
282 hba->cid_que.cid_q_cons_idx = 0;
283
284 hba->cid_que.cid_free_cnt--;
285 return hba->cid_que.cid_que[idx];
286}
287
288
289/**
290 * bnx2i_free_iscsi_cid - returns tcp port to free list
291 * @hba: pointer to adapter instance
292 * @iscsi_cid: iscsi context ID to free
293 */
294static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
295{
296 int idx;
297
298 if (iscsi_cid == (u16) -1)
299 return;
300
301 hba->cid_que.cid_free_cnt++;
302
303 idx = hba->cid_que.cid_q_prod_idx;
304 hba->cid_que.cid_que[idx] = iscsi_cid;
305 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
306 hba->cid_que.cid_q_prod_idx++;
307 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
308 hba->cid_que.cid_q_prod_idx = 0;
309}
310
311
312/**
313 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
314 * @hba: pointer to adapter instance
315 *
316 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
317 * and initialize table attributes
318 */
319static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
320{
321 int mem_size;
322 int i;
323
324 mem_size = hba->max_active_conns * sizeof(u32);
325 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
326
327 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
328 if (!hba->cid_que.cid_que_base)
329 return -ENOMEM;
330
331 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
332 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
333 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
334 if (!hba->cid_que.conn_cid_tbl) {
335 kfree(hba->cid_que.cid_que_base);
336 hba->cid_que.cid_que_base = NULL;
337 return -ENOMEM;
338 }
339
340 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
341 hba->cid_que.cid_q_prod_idx = 0;
342 hba->cid_que.cid_q_cons_idx = 0;
343 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
344 hba->cid_que.cid_free_cnt = hba->max_active_conns;
345
346 for (i = 0; i < hba->max_active_conns; i++) {
347 hba->cid_que.cid_que[i] = i;
348 hba->cid_que.conn_cid_tbl[i] = NULL;
349 }
350 return 0;
351}
352
353
354/**
355 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
356 * @hba: pointer to adapter instance
357 */
358static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
359{
360 kfree(hba->cid_que.cid_que_base);
361 hba->cid_que.cid_que_base = NULL;
362
363 kfree(hba->cid_que.conn_cid_tbl);
364 hba->cid_que.conn_cid_tbl = NULL;
365}
366
367
368/**
369 * bnx2i_alloc_ep - allocates ep structure from global pool
370 * @hba: pointer to adapter instance
371 *
372 * routine allocates a free endpoint structure from global pool and
373 * a tcp port to be used for this connection. Global resource lock,
374 * 'bnx2i_resc_lock' is held while accessing shared global data structures
375 */
376static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
377{
378 struct iscsi_endpoint *ep;
379 struct bnx2i_endpoint *bnx2i_ep;
380
381 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
382 if (!ep) {
383 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
384 return NULL;
385 }
386
387 bnx2i_ep = ep->dd_data;
388 INIT_LIST_HEAD(&bnx2i_ep->link);
389 bnx2i_ep->state = EP_STATE_IDLE;
390 bnx2i_ep->hba = hba;
391 bnx2i_ep->hba_age = hba->age;
392 hba->ofld_conns_active++;
393 init_waitqueue_head(&bnx2i_ep->ofld_wait);
394 return ep;
395}
396
397
398/**
399 * bnx2i_free_ep - free endpoint
400 * @ep: pointer to iscsi endpoint structure
401 */
402static void bnx2i_free_ep(struct iscsi_endpoint *ep)
403{
404 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
405 unsigned long flags;
406
407 spin_lock_irqsave(&bnx2i_resc_lock, flags);
408 bnx2i_ep->state = EP_STATE_IDLE;
409 bnx2i_ep->hba->ofld_conns_active--;
410
411 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
412 if (bnx2i_ep->conn) {
413 bnx2i_ep->conn->ep = NULL;
414 bnx2i_ep->conn = NULL;
415 }
416
417 bnx2i_ep->hba = NULL;
418 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
419 iscsi_destroy_endpoint(ep);
420}
421
422
423/**
424 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
425 * @hba: adapter instance pointer
426 * @session: iscsi session pointer
427 * @cmd: iscsi command structure
428 */
429static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
430 struct bnx2i_cmd *cmd)
431{
432 struct io_bdt *io = &cmd->io_tbl;
433 struct iscsi_bd *bd;
434
435 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
436 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
437 &io->bd_tbl_dma, GFP_KERNEL);
438 if (!io->bd_tbl) {
439 iscsi_session_printk(KERN_ERR, session, "Could not "
440 "allocate bdt.\n");
441 return -ENOMEM;
442 }
443 io->bd_valid = 0;
444 return 0;
445}
446
447/**
448 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
449 * @hba: adapter instance pointer
450 * @session: iscsi session pointer
451 * @cmd: iscsi command structure
452 */
453static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
454 struct iscsi_session *session)
455{
456 int i;
457
458 for (i = 0; i < session->cmds_max; i++) {
459 struct iscsi_task *task = session->cmds[i];
460 struct bnx2i_cmd *cmd = task->dd_data;
461
462 if (cmd->io_tbl.bd_tbl)
463 dma_free_coherent(&hba->pcidev->dev,
464 ISCSI_MAX_BDS_PER_CMD *
465 sizeof(struct iscsi_bd),
466 cmd->io_tbl.bd_tbl,
467 cmd->io_tbl.bd_tbl_dma);
468 }
469
470}
471
472
473/**
474 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
475 * @hba: adapter instance pointer
476 * @session: iscsi session pointer
477 */
478static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
479 struct iscsi_session *session)
480{
481 int i;
482
483 for (i = 0; i < session->cmds_max; i++) {
484 struct iscsi_task *task = session->cmds[i];
485 struct bnx2i_cmd *cmd = task->dd_data;
486
487 /* Anil */
488 task->hdr = &cmd->hdr;
489 task->hdr_max = sizeof(struct iscsi_hdr);
490
491 if (bnx2i_alloc_bdt(hba, session, cmd))
492 goto free_bdts;
493 }
494
495 return 0;
496
497free_bdts:
498 bnx2i_destroy_cmd_pool(hba, session);
499 return -ENOMEM;
500}
501
502
503/**
504 * bnx2i_setup_mp_bdt - allocate BD table resources
505 * @hba: pointer to adapter structure
506 *
507 * Allocate memory for dummy buffer and associated BD
508 * table to be used by middle path (MP) requests
509 */
510static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
511{
512 int rc = 0;
513 struct iscsi_bd *mp_bdt;
514 u64 addr;
515
516 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
517 &hba->mp_bd_dma, GFP_KERNEL);
518 if (!hba->mp_bd_tbl) {
519 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
520 rc = -1;
521 goto out;
522 }
523
524 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
525 &hba->dummy_buf_dma, GFP_KERNEL);
526 if (!hba->dummy_buffer) {
527 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
528 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
529 hba->mp_bd_tbl, hba->mp_bd_dma);
530 hba->mp_bd_tbl = NULL;
531 rc = -1;
532 goto out;
533 }
534
535 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
536 addr = (unsigned long) hba->dummy_buf_dma;
537 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
538 mp_bdt->buffer_addr_hi = addr >> 32;
539 mp_bdt->buffer_length = PAGE_SIZE;
540 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
541 ISCSI_BD_FIRST_IN_BD_CHAIN;
542out:
543 return rc;
544}
545
546
547/**
548 * bnx2i_free_mp_bdt - releases ITT back to free pool
549 * @hba: pointer to adapter instance
550 *
551 * free MP dummy buffer and associated BD table
552 */
553static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
554{
555 if (hba->mp_bd_tbl) {
556 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
557 hba->mp_bd_tbl, hba->mp_bd_dma);
558 hba->mp_bd_tbl = NULL;
559 }
560 if (hba->dummy_buffer) {
561 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
562 hba->dummy_buffer, hba->dummy_buf_dma);
563 hba->dummy_buffer = NULL;
564 }
565 return;
566}
567
568/**
569 * bnx2i_drop_session - notifies iscsid of connection error.
570 * @hba: adapter instance pointer
571 * @session: iscsi session pointer
572 *
573 * This notifies iscsid that there is a error, so it can initiate
574 * recovery.
575 *
576 * This relies on caller using the iscsi class iterator so the object
577 * is refcounted and does not disapper from under us.
578 */
579void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
580{
581 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
582}
583
584/**
585 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
586 * @hba: pointer to adapter instance
587 * @ep: pointer to endpoint (transport indentifier) structure
588 *
589 * EP destroy queue manager
590 */
591static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
592 struct bnx2i_endpoint *ep)
593{
594 write_lock_bh(&hba->ep_rdwr_lock);
595 list_add_tail(&ep->link, &hba->ep_destroy_list);
596 write_unlock_bh(&hba->ep_rdwr_lock);
597 return 0;
598}
599
600/**
601 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
602 *
603 * @hba: pointer to adapter instance
604 * @ep: pointer to endpoint (transport indentifier) structure
605 *
606 * EP destroy queue manager
607 */
608static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
609 struct bnx2i_endpoint *ep)
610{
611 write_lock_bh(&hba->ep_rdwr_lock);
612 list_del_init(&ep->link);
613 write_unlock_bh(&hba->ep_rdwr_lock);
614
615 return 0;
616}
617
618/**
619 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
620 * @hba: pointer to adapter instance
621 * @ep: pointer to endpoint (transport indentifier) structure
622 *
623 * pending conn offload completion queue manager
624 */
625static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
626 struct bnx2i_endpoint *ep)
627{
628 write_lock_bh(&hba->ep_rdwr_lock);
629 list_add_tail(&ep->link, &hba->ep_ofld_list);
630 write_unlock_bh(&hba->ep_rdwr_lock);
631 return 0;
632}
633
634/**
635 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
636 * @hba: pointer to adapter instance
637 * @ep: pointer to endpoint (transport indentifier) structure
638 *
639 * pending conn offload completion queue manager
640 */
641static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
642 struct bnx2i_endpoint *ep)
643{
644 write_lock_bh(&hba->ep_rdwr_lock);
645 list_del_init(&ep->link);
646 write_unlock_bh(&hba->ep_rdwr_lock);
647 return 0;
648}
649
650
651/**
652 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
653 *
654 * @hba: pointer to adapter instance
655 * @iscsi_cid: iscsi context ID to find
656 *
657 */
658struct bnx2i_endpoint *
659bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
660{
661 struct list_head *list;
662 struct list_head *tmp;
663 struct bnx2i_endpoint *ep;
664
665 read_lock_bh(&hba->ep_rdwr_lock);
666 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
667 ep = (struct bnx2i_endpoint *)list;
668
669 if (ep->ep_iscsi_cid == iscsi_cid)
670 break;
671 ep = NULL;
672 }
673 read_unlock_bh(&hba->ep_rdwr_lock);
674
675 if (!ep)
676 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
677 return ep;
678}
679
680
681/**
682 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
683 * @hba: pointer to adapter instance
684 * @iscsi_cid: iscsi context ID to find
685 *
686 */
687struct bnx2i_endpoint *
688bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
689{
690 struct list_head *list;
691 struct list_head *tmp;
692 struct bnx2i_endpoint *ep;
693
694 read_lock_bh(&hba->ep_rdwr_lock);
695 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
696 ep = (struct bnx2i_endpoint *)list;
697
698 if (ep->ep_iscsi_cid == iscsi_cid)
699 break;
700 ep = NULL;
701 }
702 read_unlock_bh(&hba->ep_rdwr_lock);
703
704 if (!ep)
705 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
706
707 return ep;
708}
709
710/**
711 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
712 * @hba: pointer to adapter instance
713 * @shost: scsi host pointer
714 *
715 * Initializes 'can_queue' parameter based on how many outstanding commands
716 * the device can handle. Each device 5708/5709/57710 has different
717 * capabilities
718 */
719static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
720 struct Scsi_Host *shost)
721{
722 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
723 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
724 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
725 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
726 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
727 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
728 else
729 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
730}
731
732
733/**
734 * bnx2i_alloc_hba - allocate and init adapter instance
735 * @cnic: cnic device pointer
736 *
737 * allocate & initialize adapter structure and call other
738 * support routines to do per adapter initialization
739 */
740struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
741{
742 struct Scsi_Host *shost;
743 struct bnx2i_hba *hba;
744
745 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
746 if (!shost)
747 return NULL;
748 shost->dma_boundary = cnic->pcidev->dma_mask;
749 shost->transportt = bnx2i_scsi_xport_template;
750 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
751 shost->max_channel = 0;
752 shost->max_lun = 512;
753 shost->max_cmd_len = 16;
754
755 hba = iscsi_host_priv(shost);
756 hba->shost = shost;
757 hba->netdev = cnic->netdev;
758 /* Get PCI related information and update hba struct members */
759 hba->pcidev = cnic->pcidev;
760 pci_dev_get(hba->pcidev);
761 hba->pci_did = hba->pcidev->device;
762 hba->pci_vid = hba->pcidev->vendor;
763 hba->pci_sdid = hba->pcidev->subsystem_device;
764 hba->pci_svid = hba->pcidev->subsystem_vendor;
765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
767 bnx2i_identify_device(hba);
768
769 bnx2i_identify_device(hba);
770 bnx2i_setup_host_queue_size(hba, shost);
771
772 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
773 hba->regview = ioremap_nocache(hba->netdev->base_addr,
774 BNX2_MQ_CONFIG2);
775 if (!hba->regview)
776 goto ioreg_map_err;
777 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
778 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
779 if (!hba->regview)
780 goto ioreg_map_err;
781 }
782
783 if (bnx2i_setup_mp_bdt(hba))
784 goto mp_bdt_mem_err;
785
786 INIT_LIST_HEAD(&hba->ep_ofld_list);
787 INIT_LIST_HEAD(&hba->ep_destroy_list);
788 rwlock_init(&hba->ep_rdwr_lock);
789
790 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
791
792 /* different values for 5708/5709/57710 */
793 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
794
795 if (bnx2i_setup_free_cid_que(hba))
796 goto cid_que_err;
797
798 /* SQ/RQ/CQ size can be changed via sysfx interface */
799 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
800 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
801 hba->max_sqes = sq_size;
802 else
803 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
804 } else { /* 5706/5708/5709 */
805 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
806 hba->max_sqes = sq_size;
807 else
808 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
809 }
810
811 hba->max_rqes = rq_size;
812 hba->max_cqes = hba->max_sqes + rq_size;
813 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
814 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
815 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
816 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
817 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
818
819 hba->num_ccell = hba->max_sqes / 2;
820
821 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock);
823
824 if (iscsi_host_add(shost, &hba->pcidev->dev))
825 goto free_dump_mem;
826 return hba;
827
828free_dump_mem:
829 bnx2i_release_free_cid_que(hba);
830cid_que_err:
831 bnx2i_free_mp_bdt(hba);
832mp_bdt_mem_err:
833 if (hba->regview) {
834 iounmap(hba->regview);
835 hba->regview = NULL;
836 }
837ioreg_map_err:
838 pci_dev_put(hba->pcidev);
839 scsi_host_put(shost);
840 return NULL;
841}
842
843/**
844 * bnx2i_free_hba- releases hba structure and resources held by the adapter
845 * @hba: pointer to adapter instance
846 *
847 * free adapter structure and call various cleanup routines.
848 */
849void bnx2i_free_hba(struct bnx2i_hba *hba)
850{
851 struct Scsi_Host *shost = hba->shost;
852
853 iscsi_host_remove(shost);
854 INIT_LIST_HEAD(&hba->ep_ofld_list);
855 INIT_LIST_HEAD(&hba->ep_destroy_list);
856 pci_dev_put(hba->pcidev);
857
858 if (hba->regview) {
859 iounmap(hba->regview);
860 hba->regview = NULL;
861 }
862 bnx2i_free_mp_bdt(hba);
863 bnx2i_release_free_cid_que(hba);
864 iscsi_host_free(shost);
865}
866
867/**
868 * bnx2i_conn_free_login_resources - free DMA resources used for login process
869 * @hba: pointer to adapter instance
870 * @bnx2i_conn: iscsi connection pointer
871 *
872 * Login related resources, mostly BDT & payload DMA memory is freed
873 */
874static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
875 struct bnx2i_conn *bnx2i_conn)
876{
877 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
878 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
879 bnx2i_conn->gen_pdu.resp_bd_tbl,
880 bnx2i_conn->gen_pdu.resp_bd_dma);
881 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
882 }
883
884 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
885 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
886 bnx2i_conn->gen_pdu.req_bd_tbl,
887 bnx2i_conn->gen_pdu.req_bd_dma);
888 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
889 }
890
891 if (bnx2i_conn->gen_pdu.resp_buf) {
892 dma_free_coherent(&hba->pcidev->dev,
893 ISCSI_DEF_MAX_RECV_SEG_LEN,
894 bnx2i_conn->gen_pdu.resp_buf,
895 bnx2i_conn->gen_pdu.resp_dma_addr);
896 bnx2i_conn->gen_pdu.resp_buf = NULL;
897 }
898
899 if (bnx2i_conn->gen_pdu.req_buf) {
900 dma_free_coherent(&hba->pcidev->dev,
901 ISCSI_DEF_MAX_RECV_SEG_LEN,
902 bnx2i_conn->gen_pdu.req_buf,
903 bnx2i_conn->gen_pdu.req_dma_addr);
904 bnx2i_conn->gen_pdu.req_buf = NULL;
905 }
906}
907
908/**
909 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
910 * @hba: pointer to adapter instance
911 * @bnx2i_conn: iscsi connection pointer
912 *
913 * Mgmt task DNA resources are allocated in this routine.
914 */
915static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
916 struct bnx2i_conn *bnx2i_conn)
917{
918 /* Allocate memory for login request/response buffers */
919 bnx2i_conn->gen_pdu.req_buf =
920 dma_alloc_coherent(&hba->pcidev->dev,
921 ISCSI_DEF_MAX_RECV_SEG_LEN,
922 &bnx2i_conn->gen_pdu.req_dma_addr,
923 GFP_KERNEL);
924 if (bnx2i_conn->gen_pdu.req_buf == NULL)
925 goto login_req_buf_failure;
926
927 bnx2i_conn->gen_pdu.req_buf_size = 0;
928 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
929
930 bnx2i_conn->gen_pdu.resp_buf =
931 dma_alloc_coherent(&hba->pcidev->dev,
932 ISCSI_DEF_MAX_RECV_SEG_LEN,
933 &bnx2i_conn->gen_pdu.resp_dma_addr,
934 GFP_KERNEL);
935 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
936 goto login_resp_buf_failure;
937
938 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
939 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
940
941 bnx2i_conn->gen_pdu.req_bd_tbl =
942 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
943 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
944 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
945 goto login_req_bd_tbl_failure;
946
947 bnx2i_conn->gen_pdu.resp_bd_tbl =
948 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
949 &bnx2i_conn->gen_pdu.resp_bd_dma,
950 GFP_KERNEL);
951 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
952 goto login_resp_bd_tbl_failure;
953
954 return 0;
955
956login_resp_bd_tbl_failure:
957 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
958 bnx2i_conn->gen_pdu.req_bd_tbl,
959 bnx2i_conn->gen_pdu.req_bd_dma);
960 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
961
962login_req_bd_tbl_failure:
963 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
964 bnx2i_conn->gen_pdu.resp_buf,
965 bnx2i_conn->gen_pdu.resp_dma_addr);
966 bnx2i_conn->gen_pdu.resp_buf = NULL;
967login_resp_buf_failure:
968 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
969 bnx2i_conn->gen_pdu.req_buf,
970 bnx2i_conn->gen_pdu.req_dma_addr);
971 bnx2i_conn->gen_pdu.req_buf = NULL;
972login_req_buf_failure:
973 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
974 "login resource alloc failed!!\n");
975 return -ENOMEM;
976
977}
978
979
980/**
981 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
982 * @bnx2i_conn: iscsi connection pointer
983 *
984 * Allocates buffers and BD tables before shipping requests to cnic
985 * for PDUs prepared by 'iscsid' daemon
986 */
987static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
988{
989 struct iscsi_bd *bd_tbl;
990
991 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
992
993 bd_tbl->buffer_addr_hi =
994 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
995 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
996 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
997 bnx2i_conn->gen_pdu.req_buf;
998 bd_tbl->reserved0 = 0;
999 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1000 ISCSI_BD_FIRST_IN_BD_CHAIN;
1001
1002 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1003 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1004 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1005 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1006 bd_tbl->reserved0 = 0;
1007 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1008 ISCSI_BD_FIRST_IN_BD_CHAIN;
1009}
1010
1011
1012/**
1013 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1014 * @task: transport layer task pointer
1015 *
1016 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1017 * Nop-out and Logout requests flow through this path.
1018 */
1019static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1020{
1021 struct bnx2i_cmd *cmd = task->dd_data;
1022 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1023 int rc = 0;
1024 char *buf;
1025 int data_len;
1026
1027 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1028 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1029 case ISCSI_OP_LOGIN:
1030 bnx2i_send_iscsi_login(bnx2i_conn, task);
1031 break;
1032 case ISCSI_OP_NOOP_OUT:
1033 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1034 buf = bnx2i_conn->gen_pdu.req_buf;
1035 if (data_len)
1036 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1037 RESERVED_ITT,
1038 buf, data_len, 1);
1039 else
1040 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1041 RESERVED_ITT,
1042 NULL, 0, 1);
1043 break;
1044 case ISCSI_OP_LOGOUT:
1045 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1046 break;
1047 case ISCSI_OP_SCSI_TMFUNC:
1048 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1049 break;
1050 default:
1051 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1052 "send_gen: unsupported op 0x%x\n",
1053 task->hdr->opcode);
1054 }
1055 return rc;
1056}
1057
1058
1059/**********************************************************************
1060 * SCSI-ML Interface
1061 **********************************************************************/
1062
1063/**
1064 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1065 * @sc: SCSI-ML command pointer
1066 * @cmd: iscsi cmd pointer
1067 */
1068static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1069{
1070 u32 dword;
1071 int lpcnt;
1072 u8 *srcp;
1073 u32 *dstp;
1074 u32 scsi_lun[2];
1075
1076 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1077 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1078 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1079
1080 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1081 srcp = (u8 *) sc->cmnd;
1082 dstp = (u32 *) cmd->req.cdb;
1083 while (lpcnt--) {
1084 memcpy(&dword, (const void *) srcp, 4);
1085 *dstp = cpu_to_be32(dword);
1086 srcp += 4;
1087 dstp++;
1088 }
1089 if (sc->cmd_len & 0x3) {
1090 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1091 *dstp = cpu_to_be32(dword);
1092 }
1093}
1094
1095static void bnx2i_cleanup_task(struct iscsi_task *task)
1096{
1097 struct iscsi_conn *conn = task->conn;
1098 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1099 struct bnx2i_hba *hba = bnx2i_conn->hba;
1100
1101 /*
1102 * mgmt task or cmd was never sent to us to transmit.
1103 */
1104 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1105 return;
1106 /*
1107 * need to clean-up task context to claim dma buffers
1108 */
1109 if (task->state == ISCSI_TASK_ABRT_TMF) {
1110 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1111
1112 spin_unlock_bh(&conn->session->lock);
1113 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1114 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1115 spin_lock_bh(&conn->session->lock);
1116 }
1117 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1118}
1119
1120/**
1121 * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1122 * @conn: transport layer conn structure pointer
1123 * @task: transport layer command structure pointer
1124 */
1125static int
1126bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1127{
1128 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1129 struct bnx2i_cmd *cmd = task->dd_data;
1130
1131 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1132
1133 bnx2i_setup_cmd_wqe_template(cmd);
1134 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1135 if (task->data_count) {
1136 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1137 task->data_count);
1138 bnx2i_conn->gen_pdu.req_wr_ptr =
1139 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1140 }
1141 cmd->conn = conn->dd_data;
1142 cmd->scsi_cmd = NULL;
1143 return bnx2i_iscsi_send_generic_request(task);
1144}
1145
1146/**
1147 * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1148 * @task: transport layer command structure pointer
1149 *
1150 * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1151 */
1152static int bnx2i_task_xmit(struct iscsi_task *task)
1153{
1154 struct iscsi_conn *conn = task->conn;
1155 struct iscsi_session *session = conn->session;
1156 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1157 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1158 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1159 struct scsi_cmnd *sc = task->sc;
1160 struct bnx2i_cmd *cmd = task->dd_data;
1161 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1162
1163 if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
1164 return -ENOTCONN;
1165
1166 if (!bnx2i_conn->is_bound)
1167 return -ENOTCONN;
1168
1169 /*
1170 * If there is no scsi_cmnd this must be a mgmt task
1171 */
1172 if (!sc)
1173 return bnx2i_mtask_xmit(conn, task);
1174
1175 bnx2i_setup_cmd_wqe_template(cmd);
1176 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1177 cmd->conn = bnx2i_conn;
1178 cmd->scsi_cmd = sc;
1179 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1180 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1181
1182 bnx2i_iscsi_map_sg_list(cmd);
1183 bnx2i_cpy_scsi_cdb(sc, cmd);
1184
1185 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1186 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1187 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1188 cmd->req.itt = task->itt |
1189 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1190 bnx2i_setup_write_cmd_bd_info(task);
1191 } else {
1192 if (scsi_bufflen(sc))
1193 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1194 cmd->req.itt = task->itt |
1195 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1196 }
1197
1198 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1199 if (!cmd->io_tbl.bd_valid) {
1200 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1201 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1202 cmd->req.num_bds = 1;
1203 }
1204
1205 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1206 return 0;
1207}
1208
1209/**
1210 * bnx2i_session_create - create a new iscsi session
1211 * @cmds_max: max commands supported
1212 * @qdepth: scsi queue depth to support
1213 * @initial_cmdsn: initial iscsi CMDSN to be used for this session
1214 *
1215 * Creates a new iSCSI session instance on given device.
1216 */
1217static struct iscsi_cls_session *
1218bnx2i_session_create(struct iscsi_endpoint *ep,
1219 uint16_t cmds_max, uint16_t qdepth,
1220 uint32_t initial_cmdsn)
1221{
1222 struct Scsi_Host *shost;
1223 struct iscsi_cls_session *cls_session;
1224 struct bnx2i_hba *hba;
1225 struct bnx2i_endpoint *bnx2i_ep;
1226
1227 if (!ep) {
1228 printk(KERN_ERR "bnx2i: missing ep.\n");
1229 return NULL;
1230 }
1231
1232 bnx2i_ep = ep->dd_data;
1233 shost = bnx2i_ep->hba->shost;
1234 hba = iscsi_host_priv(shost);
1235 if (bnx2i_adapter_ready(hba))
1236 return NULL;
1237
1238 /*
1239 * user can override hw limit as long as it is within
1240 * the min/max.
1241 */
1242 if (cmds_max > hba->max_sqes)
1243 cmds_max = hba->max_sqes;
1244 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1245 cmds_max = BNX2I_SQ_WQES_MIN;
1246
1247 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1248 cmds_max, sizeof(struct bnx2i_cmd),
1249 initial_cmdsn, ISCSI_MAX_TARGET);
1250 if (!cls_session)
1251 return NULL;
1252
1253 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1254 goto session_teardown;
1255 return cls_session;
1256
1257session_teardown:
1258 iscsi_session_teardown(cls_session);
1259 return NULL;
1260}
1261
1262
1263/**
1264 * bnx2i_session_destroy - destroys iscsi session
1265 * @cls_session: pointer to iscsi cls session
1266 *
1267 * Destroys previously created iSCSI session instance and releases
1268 * all resources held by it
1269 */
1270static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1271{
1272 struct iscsi_session *session = cls_session->dd_data;
1273 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1274 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1275
1276 bnx2i_destroy_cmd_pool(hba, session);
1277 iscsi_session_teardown(cls_session);
1278}
1279
1280
1281/**
1282 * bnx2i_conn_create - create iscsi connection instance
1283 * @cls_session: pointer to iscsi cls session
1284 * @cid: iscsi cid as per rfc (not NX2's CID terminology)
1285 *
1286 * Creates a new iSCSI connection instance for a given session
1287 */
1288static struct iscsi_cls_conn *
1289bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1290{
1291 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1292 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1293 struct bnx2i_conn *bnx2i_conn;
1294 struct iscsi_cls_conn *cls_conn;
1295 struct iscsi_conn *conn;
1296
1297 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1298 cid);
1299 if (!cls_conn)
1300 return NULL;
1301 conn = cls_conn->dd_data;
1302
1303 bnx2i_conn = conn->dd_data;
1304 bnx2i_conn->cls_conn = cls_conn;
1305 bnx2i_conn->hba = hba;
1306 /* 'ep' ptr will be assigned in bind() call */
1307 bnx2i_conn->ep = NULL;
1308 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1309
1310 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1311 iscsi_conn_printk(KERN_ALERT, conn,
1312 "conn_new: login resc alloc failed!!\n");
1313 goto free_conn;
1314 }
1315
1316 return cls_conn;
1317
1318free_conn:
1319 iscsi_conn_teardown(cls_conn);
1320 return NULL;
1321}
1322
1323/**
1324 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1325 * @cls_session: pointer to iscsi cls session
1326 * @cls_conn: pointer to iscsi cls conn
1327 * @transport_fd: 64-bit EP handle
1328 * @is_leading: leading connection on this session?
1329 *
1330 * Binds together iSCSI session instance, iSCSI connection instance
1331 * and the TCP connection. This routine returns error code if
1332 * TCP connection does not belong on the device iSCSI sess/conn
1333 * is bound
1334 */
1335static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1336 struct iscsi_cls_conn *cls_conn,
1337 uint64_t transport_fd, int is_leading)
1338{
1339 struct iscsi_conn *conn = cls_conn->dd_data;
1340 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1341 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1342 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1343 struct bnx2i_endpoint *bnx2i_ep;
1344 struct iscsi_endpoint *ep;
1345 int ret_code;
1346
1347 ep = iscsi_lookup_endpoint(transport_fd);
1348 if (!ep)
1349 return -EINVAL;
1350
1351 bnx2i_ep = ep->dd_data;
1352 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1353 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1354 /* Peer disconnect via' FIN or RST */
1355 return -EINVAL;
1356
1357 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1358 return -EINVAL;
1359
1360 if (bnx2i_ep->hba != hba) {
1361 /* Error - TCP connection does not belong to this device
1362 */
1363 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1364 "conn bind, ep=0x%p (%s) does not",
1365 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1366 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1367 "belong to hba (%s)\n",
1368 hba->netdev->name);
1369 return -EEXIST;
1370 }
1371
1372 bnx2i_ep->conn = bnx2i_conn;
1373 bnx2i_conn->ep = bnx2i_ep;
1374 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1375 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1376 bnx2i_conn->is_bound = 1;
1377
1378 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1379 bnx2i_ep->ep_iscsi_cid);
1380
1381 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1382 * driver needs to explicitly replenish RQ index during setup.
1383 */
1384 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1385 bnx2i_put_rq_buf(bnx2i_conn, 0);
1386
1387 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1388 return ret_code;
1389}
1390
1391
1392/**
1393 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1394 * @cls_conn: pointer to iscsi cls conn
1395 *
1396 * Destroy an iSCSI connection instance and release memory resources held by
1397 * this connection
1398 */
1399static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1400{
1401 struct iscsi_conn *conn = cls_conn->dd_data;
1402 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1403 struct Scsi_Host *shost;
1404 struct bnx2i_hba *hba;
1405
1406 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1407 hba = iscsi_host_priv(shost);
1408
1409 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1410 iscsi_conn_teardown(cls_conn);
1411}
1412
1413
1414/**
1415 * bnx2i_conn_get_param - return iscsi connection parameter to caller
1416 * @cls_conn: pointer to iscsi cls conn
1417 * @param: parameter type identifier
1418 * @buf: buffer pointer
1419 *
1420 * returns iSCSI connection parameters
1421 */
1422static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1423 enum iscsi_param param, char *buf)
1424{
1425 struct iscsi_conn *conn = cls_conn->dd_data;
1426 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1427 int len = 0;
1428
1429 switch (param) {
1430 case ISCSI_PARAM_CONN_PORT:
1431 if (bnx2i_conn->ep)
1432 len = sprintf(buf, "%hu\n",
1433 bnx2i_conn->ep->cm_sk->dst_port);
1434 break;
1435 case ISCSI_PARAM_CONN_ADDRESS:
1436 if (bnx2i_conn->ep)
1437 len = sprintf(buf, NIPQUAD_FMT "\n",
1438 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
1439 break;
1440 default:
1441 return iscsi_conn_get_param(cls_conn, param, buf);
1442 }
1443
1444 return len;
1445}
1446
1447/**
1448 * bnx2i_host_get_param - returns host (adapter) related parameters
1449 * @shost: scsi host pointer
1450 * @param: parameter type identifier
1451 * @buf: buffer pointer
1452 */
1453static int bnx2i_host_get_param(struct Scsi_Host *shost,
1454 enum iscsi_host_param param, char *buf)
1455{
1456 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1457 int len = 0;
1458
1459 switch (param) {
1460 case ISCSI_HOST_PARAM_HWADDRESS:
1461 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1462 break;
1463 case ISCSI_HOST_PARAM_NETDEV_NAME:
1464 len = sprintf(buf, "%s\n", hba->netdev->name);
1465 break;
1466 default:
1467 return iscsi_host_get_param(shost, param, buf);
1468 }
1469 return len;
1470}
1471
1472/**
1473 * bnx2i_conn_start - completes iscsi connection migration to FFP
1474 * @cls_conn: pointer to iscsi cls conn
1475 *
1476 * last call in FFP migration to handover iscsi conn to the driver
1477 */
1478static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1479{
1480 struct iscsi_conn *conn = cls_conn->dd_data;
1481 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1482
1483 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1484 bnx2i_update_iscsi_conn(conn);
1485
1486 /*
1487 * this should normally not sleep for a long time so it should
1488 * not disrupt the caller.
1489 */
1490 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1491 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1492 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1493 add_timer(&bnx2i_conn->ep->ofld_timer);
1494 /* update iSCSI context for this conn, wait for CNIC to complete */
1495 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1496 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1497
1498 if (signal_pending(current))
1499 flush_signals(current);
1500 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1501
1502 iscsi_conn_start(cls_conn);
1503 return 0;
1504}
1505
1506
1507/**
1508 * bnx2i_conn_get_stats - returns iSCSI stats
1509 * @cls_conn: pointer to iscsi cls conn
1510 * @stats: pointer to iscsi statistic struct
1511 */
1512static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1513 struct iscsi_stats *stats)
1514{
1515 struct iscsi_conn *conn = cls_conn->dd_data;
1516
1517 stats->txdata_octets = conn->txdata_octets;
1518 stats->rxdata_octets = conn->rxdata_octets;
1519 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1520 stats->dataout_pdus = conn->dataout_pdus_cnt;
1521 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1522 stats->datain_pdus = conn->datain_pdus_cnt;
1523 stats->r2t_pdus = conn->r2t_pdus_cnt;
1524 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1525 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1526 stats->custom_length = 3;
1527 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1528 stats->custom[2].value = conn->eh_abort_cnt;
1529 stats->digest_err = 0;
1530 stats->timeout_err = 0;
1531 stats->custom_length = 0;
1532}
1533
1534
1535/**
1536 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1537 * @dst_addr: target IP address
1538 *
1539 * check if route resolves to BNX2 device
1540 */
1541static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1542{
1543 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1544 struct bnx2i_hba *hba;
1545 struct cnic_dev *cnic = NULL;
1546
1547 bnx2i_reg_dev_all();
1548
1549 hba = get_adapter_list_head();
1550 if (hba && hba->cnic)
1551 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1552 if (!cnic) {
1553 printk(KERN_ALERT "bnx2i: no route,"
1554 "can't connect using cnic\n");
1555 goto no_nx2_route;
1556 }
1557 hba = bnx2i_find_hba_for_cnic(cnic);
1558 if (!hba)
1559 goto no_nx2_route;
1560
1561 if (bnx2i_adapter_ready(hba)) {
1562 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1563 goto no_nx2_route;
1564 }
1565 if (hba->netdev->mtu > hba->mtu_supported) {
1566 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1567 hba->netdev->name, hba->netdev->mtu);
1568 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1569 hba->mtu_supported);
1570 goto no_nx2_route;
1571 }
1572 return hba;
1573no_nx2_route:
1574 return NULL;
1575}
1576
1577
1578/**
1579 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1580 * @hba: pointer to adapter instance
1581 * @ep: endpoint (transport indentifier) structure
1582 *
1583 * destroys cm_sock structure and on chip iscsi context
1584 */
1585static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1586 struct bnx2i_endpoint *ep)
1587{
1588 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
1589 hba->cnic->cm_destroy(ep->cm_sk);
1590
1591 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
1592 ep->state = EP_STATE_DISCONN_COMPL;
1593
1594 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1595 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1596 printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
1597 " NW/PCIe trace, driver msgs to developers"
1598 " for analysis\n");
1599 return 1;
1600 }
1601
1602 ep->state = EP_STATE_CLEANUP_START;
1603 init_timer(&ep->ofld_timer);
1604 ep->ofld_timer.expires = 10*HZ + jiffies;
1605 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1606 ep->ofld_timer.data = (unsigned long) ep;
1607 add_timer(&ep->ofld_timer);
1608
1609 bnx2i_ep_destroy_list_add(hba, ep);
1610
1611 /* destroy iSCSI context, wait for it to complete */
1612 bnx2i_send_conn_destroy(hba, ep);
1613 wait_event_interruptible(ep->ofld_wait,
1614 (ep->state != EP_STATE_CLEANUP_START));
1615
1616 if (signal_pending(current))
1617 flush_signals(current);
1618 del_timer_sync(&ep->ofld_timer);
1619
1620 bnx2i_ep_destroy_list_del(hba, ep);
1621
1622 if (ep->state != EP_STATE_CLEANUP_CMPL)
1623 /* should never happen */
1624 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1625
1626 return 0;
1627}
1628
1629
1630/**
1631 * bnx2i_ep_connect - establish TCP connection to target portal
1632 * @shost: scsi host
1633 * @dst_addr: target IP address
1634 * @non_blocking: blocking or non-blocking call
1635 *
1636 * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1637 * with l5_core and the CNIC. This is a multi-step process of resolving
1638 * route to target, create a iscsi connection context, handshaking with
1639 * CNIC module to create/initialize the socket struct and finally
1640 * sending down option-2 request to complete TCP 3-way handshake
1641 */
1642static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1643 struct sockaddr *dst_addr,
1644 int non_blocking)
1645{
1646 u32 iscsi_cid = BNX2I_CID_RESERVED;
1647 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1648 struct sockaddr_in6 *desti6;
1649 struct bnx2i_endpoint *bnx2i_ep;
1650 struct bnx2i_hba *hba;
1651 struct cnic_dev *cnic;
1652 struct cnic_sockaddr saddr;
1653 struct iscsi_endpoint *ep;
1654 int rc = 0;
1655
1656 if (shost)
1657 /* driver is given scsi host to work with */
1658 hba = iscsi_host_priv(shost);
1659 else
1660 /*
1661 * check if the given destination can be reached through
1662 * a iscsi capable NetXtreme2 device
1663 */
1664 hba = bnx2i_check_route(dst_addr);
1665 if (!hba) {
1666 rc = -ENOMEM;
1667 goto check_busy;
1668 }
1669
1670 cnic = hba->cnic;
1671 ep = bnx2i_alloc_ep(hba);
1672 if (!ep) {
1673 rc = -ENOMEM;
1674 goto check_busy;
1675 }
1676 bnx2i_ep = ep->dd_data;
1677
1678 mutex_lock(&hba->net_dev_lock);
1679 if (bnx2i_adapter_ready(hba)) {
1680 rc = -EPERM;
1681 goto net_if_down;
1682 }
1683
1684 bnx2i_ep->state = EP_STATE_IDLE;
1685 bnx2i_ep->ep_iscsi_cid = (u16) -1;
1686 bnx2i_ep->num_active_cmds = 0;
1687 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1688 if (iscsi_cid == -1) {
1689 printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
1690 rc = -ENOMEM;
1691 goto iscsi_cid_err;
1692 }
1693 bnx2i_ep->hba_age = hba->age;
1694
1695 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1696 if (rc != 0) {
1697 printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
1698 rc = -ENOMEM;
1699 goto qp_resc_err;
1700 }
1701
1702 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1703 bnx2i_ep->state = EP_STATE_OFLD_START;
1704 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1705
1706 init_timer(&bnx2i_ep->ofld_timer);
1707 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1708 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1709 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1710 add_timer(&bnx2i_ep->ofld_timer);
1711
1712 bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
1713
1714 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1715 wait_event_interruptible(bnx2i_ep->ofld_wait,
1716 bnx2i_ep->state != EP_STATE_OFLD_START);
1717
1718 if (signal_pending(current))
1719 flush_signals(current);
1720 del_timer_sync(&bnx2i_ep->ofld_timer);
1721
1722 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1723
1724 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1725 rc = -ENOSPC;
1726 goto conn_failed;
1727 }
1728
1729 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1730 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1731 if (rc) {
1732 rc = -EINVAL;
1733 goto conn_failed;
1734 }
1735
1736 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1737 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1738 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1739
1740 memset(&saddr, 0, sizeof(saddr));
1741 if (dst_addr->sa_family == AF_INET) {
1742 desti = (struct sockaddr_in *) dst_addr;
1743 saddr.remote.v4 = *desti;
1744 saddr.local.v4.sin_family = desti->sin_family;
1745 } else if (dst_addr->sa_family == AF_INET6) {
1746 desti6 = (struct sockaddr_in6 *) dst_addr;
1747 saddr.remote.v6 = *desti6;
1748 saddr.local.v6.sin6_family = desti6->sin6_family;
1749 }
1750
1751 bnx2i_ep->timestamp = jiffies;
1752 bnx2i_ep->state = EP_STATE_CONNECT_START;
1753 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1754 rc = -EINVAL;
1755 goto conn_failed;
1756 } else
1757 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1758
1759 if (rc)
1760 goto release_ep;
1761
1762 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1763 goto release_ep;
1764 mutex_unlock(&hba->net_dev_lock);
1765 return ep;
1766
1767release_ep:
1768 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1769 mutex_unlock(&hba->net_dev_lock);
1770 return ERR_PTR(rc);
1771 }
1772conn_failed:
1773net_if_down:
1774iscsi_cid_err:
1775 bnx2i_free_qp_resc(hba, bnx2i_ep);
1776qp_resc_err:
1777 bnx2i_free_ep(ep);
1778 mutex_unlock(&hba->net_dev_lock);
1779check_busy:
1780 bnx2i_unreg_dev_all();
1781 return ERR_PTR(rc);
1782}
1783
1784
1785/**
1786 * bnx2i_ep_poll - polls for TCP connection establishement
1787 * @ep: TCP connection (endpoint) handle
1788 * @timeout_ms: timeout value in milli secs
1789 *
1790 * polls for TCP connect request to complete
1791 */
1792static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1793{
1794 struct bnx2i_endpoint *bnx2i_ep;
1795 int rc = 0;
1796
1797 bnx2i_ep = ep->dd_data;
1798 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1799 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1800 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1801 return -1;
1802 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1803 return 1;
1804
1805 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1806 ((bnx2i_ep->state ==
1807 EP_STATE_OFLD_FAILED) ||
1808 (bnx2i_ep->state ==
1809 EP_STATE_CONNECT_FAILED) ||
1810 (bnx2i_ep->state ==
1811 EP_STATE_CONNECT_COMPL)),
1812 msecs_to_jiffies(timeout_ms));
1813 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1814 rc = -1;
1815
1816 if (rc > 0)
1817 return 1;
1818 else if (!rc)
1819 return 0; /* timeout */
1820 else
1821 return rc;
1822}
1823
1824
1825/**
1826 * bnx2i_ep_tcp_conn_active - check EP state transition
1827 * @ep: endpoint pointer
1828 *
1829 * check if underlying TCP connection is active
1830 */
1831static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1832{
1833 int ret;
1834 int cnic_dev_10g = 0;
1835
1836 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1837 cnic_dev_10g = 1;
1838
1839 switch (bnx2i_ep->state) {
1840 case EP_STATE_CONNECT_START:
1841 case EP_STATE_CLEANUP_FAILED:
1842 case EP_STATE_OFLD_FAILED:
1843 case EP_STATE_DISCONN_TIMEDOUT:
1844 ret = 0;
1845 break;
1846 case EP_STATE_CONNECT_COMPL:
1847 case EP_STATE_ULP_UPDATE_START:
1848 case EP_STATE_ULP_UPDATE_COMPL:
1849 case EP_STATE_TCP_FIN_RCVD:
1850 case EP_STATE_ULP_UPDATE_FAILED:
1851 ret = 1;
1852 break;
1853 case EP_STATE_TCP_RST_RCVD:
1854 ret = 0;
1855 break;
1856 case EP_STATE_CONNECT_FAILED:
1857 if (cnic_dev_10g)
1858 ret = 1;
1859 else
1860 ret = 0;
1861 break;
1862 default:
1863 ret = 0;
1864 }
1865
1866 return ret;
1867}
1868
1869
1870/**
1871 * bnx2i_ep_disconnect - executes TCP connection teardown process
1872 * @ep: TCP connection (endpoint) handle
1873 *
1874 * executes TCP connection teardown process
1875 */
1876static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1877{
1878 struct bnx2i_endpoint *bnx2i_ep;
1879 struct bnx2i_conn *bnx2i_conn = NULL;
1880 struct iscsi_session *session = NULL;
1881 struct iscsi_conn *conn;
1882 struct cnic_dev *cnic;
1883 struct bnx2i_hba *hba;
1884
1885 bnx2i_ep = ep->dd_data;
1886
1887 /* driver should not attempt connection cleanup untill TCP_CONNECT
1888 * completes either successfully or fails. Timeout is 9-secs, so
1889 * wait for it to complete
1890 */
1891 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
1892 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
1893 msleep(250);
1894
1895 if (bnx2i_ep->conn) {
1896 bnx2i_conn = bnx2i_ep->conn;
1897 conn = bnx2i_conn->cls_conn->dd_data;
1898 session = conn->session;
1899
1900 spin_lock_bh(&session->lock);
1901 bnx2i_conn->is_bound = 0;
1902 spin_unlock_bh(&session->lock);
1903 }
1904
1905 hba = bnx2i_ep->hba;
1906 if (bnx2i_ep->state == EP_STATE_IDLE)
1907 goto return_bnx2i_ep;
1908 cnic = hba->cnic;
1909
1910 mutex_lock(&hba->net_dev_lock);
1911
1912 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
1913 goto free_resc;
1914 if (bnx2i_ep->hba_age != hba->age)
1915 goto free_resc;
1916
1917 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1918 goto destory_conn;
1919
1920 bnx2i_ep->state = EP_STATE_DISCONN_START;
1921
1922 init_timer(&bnx2i_ep->ofld_timer);
1923 bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
1924 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1925 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1926 add_timer(&bnx2i_ep->ofld_timer);
1927
1928 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1929 int close = 0;
1930
1931 if (session) {
1932 spin_lock_bh(&session->lock);
1933 if (session->state == ISCSI_STATE_LOGGING_OUT)
1934 close = 1;
1935 spin_unlock_bh(&session->lock);
1936 }
1937 if (close)
1938 cnic->cm_close(bnx2i_ep->cm_sk);
1939 else
1940 cnic->cm_abort(bnx2i_ep->cm_sk);
1941 } else
1942 goto free_resc;
1943
1944 /* wait for option-2 conn teardown */
1945 wait_event_interruptible(bnx2i_ep->ofld_wait,
1946 bnx2i_ep->state != EP_STATE_DISCONN_START);
1947
1948 if (signal_pending(current))
1949 flush_signals(current);
1950 del_timer_sync(&bnx2i_ep->ofld_timer);
1951
1952destory_conn:
1953 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1954 mutex_unlock(&hba->net_dev_lock);
1955 return;
1956 }
1957free_resc:
1958 mutex_unlock(&hba->net_dev_lock);
1959 bnx2i_free_qp_resc(hba, bnx2i_ep);
1960return_bnx2i_ep:
1961 if (bnx2i_conn)
1962 bnx2i_conn->ep = NULL;
1963
1964 bnx2i_free_ep(ep);
1965
1966 if (!hba->ofld_conns_active)
1967 bnx2i_unreg_dev_all();
1968}
1969
1970
1971/**
1972 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
1973 * @buf: pointer to buffer containing iscsi path message
1974 *
1975 */
1976static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
1977{
1978 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1979 char *buf = (char *) params;
1980 u16 len = sizeof(*params);
1981
1982 /* handled by cnic driver */
1983 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
1984 len);
1985
1986 return 0;
1987}
1988
1989
1990/*
1991 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
1992 * used while registering with the scsi host and iSCSI transport module.
1993 */
1994static struct scsi_host_template bnx2i_host_template = {
1995 .module = THIS_MODULE,
1996 .name = "Broadcom Offload iSCSI Initiator",
1997 .proc_name = "bnx2i",
1998 .queuecommand = iscsi_queuecommand,
1999 .eh_abort_handler = iscsi_eh_abort,
2000 .eh_device_reset_handler = iscsi_eh_device_reset,
2001 .eh_target_reset_handler = iscsi_eh_target_reset,
2002 .can_queue = 1024,
2003 .max_sectors = 127,
2004 .cmd_per_lun = 32,
2005 .this_id = -1,
2006 .use_clustering = ENABLE_CLUSTERING,
2007 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2008 .shost_attrs = bnx2i_dev_attributes,
2009};
2010
2011struct iscsi_transport bnx2i_iscsi_transport = {
2012 .owner = THIS_MODULE,
2013 .name = "bnx2i",
2014 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2015 CAP_MULTI_R2T | CAP_DATADGST |
2016 CAP_DATA_PATH_OFFLOAD,
2017 .param_mask = ISCSI_MAX_RECV_DLENGTH |
2018 ISCSI_MAX_XMIT_DLENGTH |
2019 ISCSI_HDRDGST_EN |
2020 ISCSI_DATADGST_EN |
2021 ISCSI_INITIAL_R2T_EN |
2022 ISCSI_MAX_R2T |
2023 ISCSI_IMM_DATA_EN |
2024 ISCSI_FIRST_BURST |
2025 ISCSI_MAX_BURST |
2026 ISCSI_PDU_INORDER_EN |
2027 ISCSI_DATASEQ_INORDER_EN |
2028 ISCSI_ERL |
2029 ISCSI_CONN_PORT |
2030 ISCSI_CONN_ADDRESS |
2031 ISCSI_EXP_STATSN |
2032 ISCSI_PERSISTENT_PORT |
2033 ISCSI_PERSISTENT_ADDRESS |
2034 ISCSI_TARGET_NAME | ISCSI_TPGT |
2035 ISCSI_USERNAME | ISCSI_PASSWORD |
2036 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2037 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2038 ISCSI_LU_RESET_TMO |
2039 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
2042 .create_session = bnx2i_session_create,
2043 .destroy_session = bnx2i_session_destroy,
2044 .create_conn = bnx2i_conn_create,
2045 .bind_conn = bnx2i_conn_bind,
2046 .destroy_conn = bnx2i_conn_destroy,
2047 .set_param = iscsi_set_param,
2048 .get_conn_param = bnx2i_conn_get_param,
2049 .get_session_param = iscsi_session_get_param,
2050 .get_host_param = bnx2i_host_get_param,
2051 .start_conn = bnx2i_conn_start,
2052 .stop_conn = iscsi_conn_stop,
2053 .send_pdu = iscsi_conn_send_pdu,
2054 .xmit_task = bnx2i_task_xmit,
2055 .get_stats = bnx2i_conn_get_stats,
2056 /* TCP connect - disconnect - option-2 interface calls */
2057 .ep_connect = bnx2i_ep_connect,
2058 .ep_poll = bnx2i_ep_poll,
2059 .ep_disconnect = bnx2i_ep_disconnect,
2060 .set_path = bnx2i_nl_set_path,
2061 /* Error recovery timeout call */
2062 .session_recovery_timedout = iscsi_session_recovery_timedout,
2063 .cleanup_task = bnx2i_cleanup_task,
2064};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 000000000000..96426b751eb2
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2004 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11
12#include "bnx2i.h"
13
14/**
15 * bnx2i_dev_to_hba - maps dev pointer to adapter struct
16 * @dev: device pointer
17 *
18 * Map device to hba structure
19 */
20static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
21{
22 struct Scsi_Host *shost = class_to_shost(dev);
23 return iscsi_host_priv(shost);
24}
25
26
27/**
28 * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
29 * @dev: device pointer
30 * @buf: buffer to return current SQ size parameter
31 *
32 * Returns current SQ size parameter, this paramater determines the number
33 * outstanding iSCSI commands supported on a connection
34 */
35static ssize_t bnx2i_show_sq_info(struct device *dev,
36 struct device_attribute *attr, char *buf)
37{
38 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
39
40 return sprintf(buf, "0x%x\n", hba->max_sqes);
41}
42
43
44/**
45 * bnx2i_set_sq_info - update send queue (SQ) size parameter
46 * @dev: device pointer
47 * @buf: buffer to return current SQ size parameter
48 * @count: parameter buffer size
49 *
50 * Interface for user to change shared queue size allocated for each conn
51 * Must be within SQ limits and a power of 2. For the latter this is needed
52 * because of how libiscsi preallocates tasks.
53 */
54static ssize_t bnx2i_set_sq_info(struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{
58 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
59 u32 val;
60 int max_sq_size;
61
62 if (hba->ofld_conns_active)
63 goto skip_config;
64
65 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
66 max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
67 else
68 max_sq_size = BNX2I_570X_SQ_WQES_MAX;
69
70 if (sscanf(buf, " 0x%x ", &val) > 0) {
71 if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
72 (is_power_of_2(val)))
73 hba->max_sqes = val;
74 }
75
76 return count;
77
78skip_config:
79 printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
80 return 0;
81}
82
83
84/**
85 * bnx2i_show_ccell_info - returns command cell (HQ) size
86 * @dev: device pointer
87 * @buf: buffer to return current SQ size parameter
88 *
89 * returns per-connection TCP history queue size parameter
90 */
91static ssize_t bnx2i_show_ccell_info(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
95
96 return sprintf(buf, "0x%x\n", hba->num_ccell);
97}
98
99
100/**
101 * bnx2i_get_link_state - set command cell (HQ) size
102 * @dev: device pointer
103 * @buf: buffer to return current SQ size parameter
104 * @count: parameter buffer size
105 *
106 * updates per-connection TCP history queue size parameter
107 */
108static ssize_t bnx2i_set_ccell_info(struct device *dev,
109 struct device_attribute *attr,
110 const char *buf, size_t count)
111{
112 u32 val;
113 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
114
115 if (hba->ofld_conns_active)
116 goto skip_config;
117
118 if (sscanf(buf, " 0x%x ", &val) > 0) {
119 if ((val >= BNX2I_CCELLS_MIN) &&
120 (val <= BNX2I_CCELLS_MAX)) {
121 hba->num_ccell = val;
122 }
123 }
124
125 return count;
126
127skip_config:
128 printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
129 return 0;
130}
131
132
133static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
134 bnx2i_show_sq_info, bnx2i_set_sq_info);
135static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
136 bnx2i_show_ccell_info, bnx2i_set_ccell_info);
137
138struct device_attribute *bnx2i_dev_attributes[] = {
139 &dev_attr_sq_size,
140 &dev_attr_num_ccell,
141 NULL
142};
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index 59b0958d2d11..e3133b58e594 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *); 144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *); 145void cxgb3i_adapter_close(struct t3cdev *);
146 146
147struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
148struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, 147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
149 struct net_device *); 148 struct net_device *);
150void cxgb3i_hba_host_remove(struct cxgb3i_hba *); 149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 9212400b9b13..74369a3f963b 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <net/dst.h>
16#include <net/tcp.h> 17#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h> 18#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h> 19#include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
178 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device 179 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
179 * @t3dev: t3cdev adapter 180 * @t3dev: t3cdev adapter
180 */ 181 */
181struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) 182static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
182{ 183{
183 struct cxgb3i_adapter *snic; 184 struct cxgb3i_adapter *snic;
184 int i; 185 int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
261 262
262/** 263/**
263 * cxgb3i_ep_connect - establish TCP connection to target portal 264 * cxgb3i_ep_connect - establish TCP connection to target portal
265 * @shost: scsi host to use
264 * @dst_addr: target IP address 266 * @dst_addr: target IP address
265 * @non_blocking: blocking or non-blocking call 267 * @non_blocking: blocking or non-blocking call
266 * 268 *
267 * Initiates a TCP/IP connection to the dst_addr 269 * Initiates a TCP/IP connection to the dst_addr
268 */ 270 */
269static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, 271static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
272 struct sockaddr *dst_addr,
270 int non_blocking) 273 int non_blocking)
271{ 274{
272 struct iscsi_endpoint *ep; 275 struct iscsi_endpoint *ep;
273 struct cxgb3i_endpoint *cep; 276 struct cxgb3i_endpoint *cep;
274 struct cxgb3i_hba *hba; 277 struct cxgb3i_hba *hba = NULL;
275 struct s3_conn *c3cn = NULL; 278 struct s3_conn *c3cn = NULL;
276 int err = 0; 279 int err = 0;
277 280
281 if (shost)
282 hba = iscsi_host_priv(shost);
283
284 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
285
278 c3cn = cxgb3i_c3cn_create(); 286 c3cn = cxgb3i_c3cn_create();
279 if (!c3cn) { 287 if (!c3cn) {
280 cxgb3i_log_info("ep connect OOM.\n"); 288 cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
282 goto release_conn; 290 goto release_conn;
283 } 291 }
284 292
285 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); 293 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
294 (struct sockaddr_in *)dst_addr);
286 if (err < 0) { 295 if (err < 0) {
287 cxgb3i_log_info("ep connect failed.\n"); 296 cxgb3i_log_info("ep connect failed.\n");
288 goto release_conn; 297 goto release_conn;
289 } 298 }
299
290 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); 300 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
291 if (!hba) { 301 if (!hba) {
292 err = -ENOSPC; 302 err = -ENOSPC;
293 cxgb3i_log_info("NOT going through cxgbi device.\n"); 303 cxgb3i_log_info("NOT going through cxgbi device.\n");
294 goto release_conn; 304 goto release_conn;
295 } 305 }
306
307 if (shost && hba != iscsi_host_priv(shost)) {
308 err = -ENOSPC;
309 cxgb3i_log_info("Could not connect through request host%u\n",
310 shost->host_no);
311 goto release_conn;
312 }
313
296 if (c3cn_is_closing(c3cn)) { 314 if (c3cn_is_closing(c3cn)) {
297 err = -ENOSPC; 315 err = -ENOSPC;
298 cxgb3i_log_info("ep connect unable to connect.\n"); 316 cxgb3i_log_info("ep connect unable to connect.\n");
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index e11c9c180f39..c1d5be4adf9c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1479 return NULL; 1479 return NULL;
1480} 1480}
1481 1481
1482static struct rtable *find_route(__be32 saddr, __be32 daddr, 1482static struct rtable *find_route(struct net_device *dev,
1483 __be32 saddr, __be32 daddr,
1483 __be16 sport, __be16 dport) 1484 __be16 sport, __be16 dport)
1484{ 1485{
1485 struct rtable *rt; 1486 struct rtable *rt;
1486 struct flowi fl = { 1487 struct flowi fl = {
1487 .oif = 0, 1488 .oif = dev ? dev->ifindex : 0,
1488 .nl_u = { 1489 .nl_u = {
1489 .ip4_u = { 1490 .ip4_u = {
1490 .daddr = daddr, 1491 .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
1573 * 1574 *
1574 * return 0 if active open request is sent, < 0 otherwise. 1575 * return 0 if active open request is sent, < 0 otherwise.
1575 */ 1576 */
1576int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) 1577int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1578 struct sockaddr_in *usin)
1577{ 1579{
1578 struct rtable *rt; 1580 struct rtable *rt;
1579 struct net_device *dev;
1580 struct cxgb3i_sdev_data *cdata; 1581 struct cxgb3i_sdev_data *cdata;
1581 struct t3cdev *cdev; 1582 struct t3cdev *cdev;
1582 __be32 sipv4; 1583 __be32 sipv4;
1583 int err; 1584 int err;
1584 1585
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1587
1585 if (usin->sin_family != AF_INET) 1588 if (usin->sin_family != AF_INET)
1586 return -EAFNOSUPPORT; 1589 return -EAFNOSUPPORT;
1587 1590
1588 c3cn->daddr.sin_port = usin->sin_port; 1591 c3cn->daddr.sin_port = usin->sin_port;
1589 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1590 1593
1591 rt = find_route(c3cn->saddr.sin_addr.s_addr, 1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1592 c3cn->daddr.sin_addr.s_addr, 1595 c3cn->daddr.sin_addr.s_addr,
1593 c3cn->saddr.sin_port, 1596 c3cn->saddr.sin_port,
1594 c3cn->daddr.sin_port); 1597 c3cn->daddr.sin_port);
1595 if (rt == NULL) { 1598 if (rt == NULL) {
1596 c3cn_conn_debug("NO route to 0x%x, port %u.\n", 1599 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1597 c3cn->daddr.sin_addr.s_addr, 1600 c3cn->daddr.sin_addr.s_addr,
1598 ntohs(c3cn->daddr.sin_port)); 1601 ntohs(c3cn->daddr.sin_port),
1602 dev ? dev->name : "any");
1599 return -ENETUNREACH; 1603 return -ENETUNREACH;
1600 } 1604 }
1601 1605
1602 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 1606 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1603 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", 1607 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1604 c3cn->daddr.sin_addr.s_addr, 1608 c3cn->daddr.sin_addr.s_addr,
1605 ntohs(c3cn->daddr.sin_port)); 1609 ntohs(c3cn->daddr.sin_port),
1610 dev ? dev->name : "any");
1606 ip_rt_put(rt); 1611 ip_rt_put(rt);
1607 return -ENETUNREACH; 1612 return -ENETUNREACH;
1608 } 1613 }
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index ebfca960c0a9..6a1d86b1fafe 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *); 169void cxgb3i_sdev_remove(struct t3cdev *);
170 170
171struct s3_conn *cxgb3i_c3cn_create(void); 171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); 172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
173void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); 174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
174int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); 175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
175void cxgb3i_c3cn_release(struct s3_conn *); 176void cxgb3i_c3cn_release(struct s3_conn *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 43b8c51e98d0..fd0544f7da81 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
561 struct rdac_dh_data *h = get_rdac_data(sdev); 561 struct rdac_dh_data *h = get_rdac_data(sdev);
562 switch (sense_hdr->sense_key) { 562 switch (sense_hdr->sense_key) {
563 case NOT_READY: 563 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
565 /* LUN Not Ready - Logical Unit Not Ready and is in
566 * the process of becoming ready
567 * Just retry.
568 */
569 return ADD_TO_MLQUEUE;
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) 570 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
565 /* LUN Not Ready - Storage firmware incompatible 571 /* LUN Not Ready - Storage firmware incompatible
566 * Manual code synchonisation required. 572 * Manual code synchonisation required.
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 03e1926f40b5..e606b4829d44 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
54/* fcoe host list */ 54/* fcoe host list */
55LIST_HEAD(fcoe_hostlist); 55LIST_HEAD(fcoe_hostlist);
56DEFINE_RWLOCK(fcoe_hostlist_lock); 56DEFINE_RWLOCK(fcoe_hostlist_lock);
57DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 57DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
59 58
60/* Function Prototypes */ 59/* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *); 70static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *); 71static int fcoe_hostlist_remove(const struct fc_lport *);
73 72
74static int fcoe_check_wait_queue(struct fc_lport *); 73static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *); 74static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void); 75static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void); 76static void fcoe_dev_cleanup(void);
@@ -146,6 +145,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
146 lp->link_up = 0; 145 lp->link_up = 0;
147 lp->qfull = 0; 146 lp->qfull = 0;
148 lp->max_retry_count = 3; 147 lp->max_retry_count = 3;
148 lp->max_rport_retry_count = 3;
149 lp->e_d_tov = 2 * 1000; /* FC-FS default */ 149 lp->e_d_tov = 2 * 1000; /* FC-FS default */
150 lp->r_a_tov = 2 * 2 * 1000; 150 lp->r_a_tov = 2 * 2 * 1000;
151 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 151 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -167,6 +167,18 @@ static int fcoe_lport_config(struct fc_lport *lp)
167} 167}
168 168
169/** 169/**
170 * fcoe_queue_timer() - fcoe queue timer
171 * @lp: the fc_lport pointer
172 *
173 * Calls fcoe_check_wait_queue on timeout
174 *
175 */
176static void fcoe_queue_timer(ulong lp)
177{
178 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
179}
180
181/**
170 * fcoe_netdev_config() - Set up netdev for SW FCoE 182 * fcoe_netdev_config() - Set up netdev for SW FCoE
171 * @lp : ptr to the fc_lport 183 * @lp : ptr to the fc_lport
172 * @netdev : ptr to the associated netdevice struct 184 * @netdev : ptr to the associated netdevice struct
@@ -236,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
236 } 248 }
237 skb_queue_head_init(&fc->fcoe_pending_queue); 249 skb_queue_head_init(&fc->fcoe_pending_queue);
238 fc->fcoe_pending_queue_active = 0; 250 fc->fcoe_pending_queue_active = 0;
251 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
239 252
240 /* setup Source Mac Address */ 253 /* setup Source Mac Address */
241 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, 254 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
@@ -386,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
386 /* Free existing skbs */ 399 /* Free existing skbs */
387 fcoe_clean_pending_queue(lp); 400 fcoe_clean_pending_queue(lp);
388 401
402 /* Stop the timer */
403 del_timer_sync(&fc->timer);
404
389 /* Free memory used by statistical counters */ 405 /* Free memory used by statistical counters */
390 fc_lport_free_stats(lp); 406 fc_lport_free_stats(lp);
391 407
@@ -988,7 +1004,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
988 */ 1004 */
989int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 1005int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
990{ 1006{
991 int wlen, rc = 0; 1007 int wlen;
992 u32 crc; 1008 u32 crc;
993 struct ethhdr *eh; 1009 struct ethhdr *eh;
994 struct fcoe_crc_eof *cp; 1010 struct fcoe_crc_eof *cp;
@@ -1021,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1021 sof = fr_sof(fp); 1037 sof = fr_sof(fp);
1022 eof = fr_eof(fp); 1038 eof = fr_eof(fp);
1023 1039
1024 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? 1040 elen = sizeof(struct ethhdr);
1025 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1026 hlen = sizeof(struct fcoe_hdr); 1041 hlen = sizeof(struct fcoe_hdr);
1027 tlen = sizeof(struct fcoe_crc_eof); 1042 tlen = sizeof(struct fcoe_crc_eof);
1028 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1043 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1107,18 +1122,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1107 /* send down to lld */ 1122 /* send down to lld */
1108 fr_dev(fp) = lp; 1123 fr_dev(fp) = lp;
1109 if (fc->fcoe_pending_queue.qlen) 1124 if (fc->fcoe_pending_queue.qlen)
1110 rc = fcoe_check_wait_queue(lp); 1125 fcoe_check_wait_queue(lp, skb);
1111 1126 else if (fcoe_start_io(skb))
1112 if (rc == 0) 1127 fcoe_check_wait_queue(lp, skb);
1113 rc = fcoe_start_io(skb);
1114
1115 if (rc) {
1116 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1117 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1118 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1119 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1120 lp->qfull = 1;
1121 }
1122 1128
1123 return 0; 1129 return 0;
1124} 1130}
@@ -1268,32 +1274,6 @@ int fcoe_percpu_receive_thread(void *arg)
1268} 1274}
1269 1275
1270/** 1276/**
1271 * fcoe_watchdog() - fcoe timer callback
1272 * @vp:
1273 *
1274 * This checks the pending queue length for fcoe and set lport qfull
1275 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1276 * fcoe_hostlist.
1277 *
1278 * Returns: 0 for success
1279 */
1280void fcoe_watchdog(ulong vp)
1281{
1282 struct fcoe_softc *fc;
1283
1284 read_lock(&fcoe_hostlist_lock);
1285 list_for_each_entry(fc, &fcoe_hostlist, list) {
1286 if (fc->ctlr.lp)
1287 fcoe_check_wait_queue(fc->ctlr.lp);
1288 }
1289 read_unlock(&fcoe_hostlist_lock);
1290
1291 fcoe_timer.expires = jiffies + (1 * HZ);
1292 add_timer(&fcoe_timer);
1293}
1294
1295
1296/**
1297 * fcoe_check_wait_queue() - attempt to clear the transmit backlog 1277 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1298 * @lp: the fc_lport 1278 * @lp: the fc_lport
1299 * 1279 *
@@ -1305,16 +1285,17 @@ void fcoe_watchdog(ulong vp)
1305 * The wait_queue is used when the skb transmit fails. skb will go 1285 * The wait_queue is used when the skb transmit fails. skb will go
1306 * in the wait_queue which will be emptied by the timer function or 1286 * in the wait_queue which will be emptied by the timer function or
1307 * by the next skb transmit. 1287 * by the next skb transmit.
1308 *
1309 * Returns: 0 for success
1310 */ 1288 */
1311static int fcoe_check_wait_queue(struct fc_lport *lp) 1289static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1312{ 1290{
1313 struct fcoe_softc *fc = lport_priv(lp); 1291 struct fcoe_softc *fc = lport_priv(lp);
1314 struct sk_buff *skb; 1292 int rc;
1315 int rc = -1;
1316 1293
1317 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1294 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1295
1296 if (skb)
1297 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1298
1318 if (fc->fcoe_pending_queue_active) 1299 if (fc->fcoe_pending_queue_active)
1319 goto out; 1300 goto out;
1320 fc->fcoe_pending_queue_active = 1; 1301 fc->fcoe_pending_queue_active = 1;
@@ -1340,23 +1321,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
1340 1321
1341 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1322 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1342 lp->qfull = 0; 1323 lp->qfull = 0;
1324 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1325 mod_timer(&fc->timer, jiffies + 2);
1343 fc->fcoe_pending_queue_active = 0; 1326 fc->fcoe_pending_queue_active = 0;
1344 rc = fc->fcoe_pending_queue.qlen;
1345out: 1327out:
1328 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1329 lp->qfull = 1;
1346 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1330 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1347 return rc; 1331 return;
1348} 1332}
1349 1333
1350/** 1334/**
1351 * fcoe_dev_setup() - setup link change notification interface 1335 * fcoe_dev_setup() - setup link change notification interface
1352 */ 1336 */
1353static void fcoe_dev_setup() 1337static void fcoe_dev_setup(void)
1354{ 1338{
1355 register_netdevice_notifier(&fcoe_notifier); 1339 register_netdevice_notifier(&fcoe_notifier);
1356} 1340}
1357 1341
1358/** 1342/**
1359 * fcoe_dev_setup() - cleanup link change notification interface 1343 * fcoe_dev_cleanup() - cleanup link change notification interface
1360 */ 1344 */
1361static void fcoe_dev_cleanup(void) 1345static void fcoe_dev_cleanup(void)
1362{ 1346{
@@ -1815,10 +1799,6 @@ static int __init fcoe_init(void)
1815 /* Setup link change notification */ 1799 /* Setup link change notification */
1816 fcoe_dev_setup(); 1800 fcoe_dev_setup();
1817 1801
1818 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1819
1820 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1821
1822 fcoe_if_init(); 1802 fcoe_if_init();
1823 1803
1824 return 0; 1804 return 0;
@@ -1844,9 +1824,6 @@ static void __exit fcoe_exit(void)
1844 1824
1845 fcoe_dev_cleanup(); 1825 fcoe_dev_cleanup();
1846 1826
1847 /* Stop the timer */
1848 del_timer_sync(&fcoe_timer);
1849
1850 /* releases the associated fcoe hosts */ 1827 /* releases the associated fcoe hosts */
1851 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 1828 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1852 fcoe_if_destroy(fc->real_dev); 1829 fcoe_if_destroy(fc->real_dev);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 917aae886897..a1eb8c1988b0 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -61,6 +61,7 @@ struct fcoe_softc {
61 struct packet_type fip_packet_type; 61 struct packet_type fip_packet_type;
62 struct sk_buff_head fcoe_pending_queue; 62 struct sk_buff_head fcoe_pending_queue;
63 u8 fcoe_pending_queue_active; 63 u8 fcoe_pending_queue_active;
64 struct timer_list timer; /* queue timer */
64 struct fcoe_ctlr ctlr; 65 struct fcoe_ctlr ctlr;
65}; 66};
66 67
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 62ba0f39c6bd..929411880e4b 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
213 sol->desc.size.fd_size = htons(fcoe_size); 213 sol->desc.size.fd_size = htons(fcoe_size);
214 214
215 skb_put(skb, sizeof(*sol)); 215 skb_put(skb, sizeof(*sol));
216 skb->protocol = htons(ETH_P_802_3); 216 skb->protocol = htons(ETH_P_FIP);
217 skb_reset_mac_header(skb); 217 skb_reset_mac_header(skb);
218 skb_reset_network_header(skb); 218 skb_reset_network_header(skb);
219 fip->send(fip, skb); 219 fip->send(fip, skb);
@@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
365 } 365 }
366 366
367 skb_put(skb, len); 367 skb_put(skb, len);
368 skb->protocol = htons(ETH_P_802_3); 368 skb->protocol = htons(ETH_P_FIP);
369 skb_reset_mac_header(skb); 369 skb_reset_mac_header(skb);
370 skb_reset_network_header(skb); 370 skb_reset_network_header(skb);
371 fip->send(fip, skb); 371 fip->send(fip, skb);
@@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
424 if (dtype != ELS_FLOGI) 424 if (dtype != ELS_FLOGI)
425 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); 425 memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
426 426
427 skb->protocol = htons(ETH_P_802_3); 427 skb->protocol = htons(ETH_P_FIP);
428 skb_reset_mac_header(skb); 428 skb_reset_mac_header(skb);
429 skb_reset_network_header(skb); 429 skb_reset_network_header(skb);
430 return 0; 430 return 0;
@@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
447 u16 old_xid; 447 u16 old_xid;
448 u8 op; 448 u8 op;
449 449
450 if (fip->state == FIP_ST_NON_FIP)
451 return 0;
452
453 fh = (struct fc_frame_header *)skb->data; 450 fh = (struct fc_frame_header *)skb->data;
454 op = *(u8 *)(fh + 1); 451 op = *(u8 *)(fh + 1);
455 452
456 switch (op) { 453 if (op == ELS_FLOGI) {
457 case ELS_FLOGI:
458 old_xid = fip->flogi_oxid; 454 old_xid = fip->flogi_oxid;
459 fip->flogi_oxid = ntohs(fh->fh_ox_id); 455 fip->flogi_oxid = ntohs(fh->fh_ox_id);
460 if (fip->state == FIP_ST_AUTO) { 456 if (fip->state == FIP_ST_AUTO) {
@@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
466 fip->map_dest = 1; 462 fip->map_dest = 1;
467 return 0; 463 return 0;
468 } 464 }
465 if (fip->state == FIP_ST_NON_FIP)
466 fip->map_dest = 1;
467 }
468
469 if (fip->state == FIP_ST_NON_FIP)
470 return 0;
471
472 switch (op) {
473 case ELS_FLOGI:
469 op = FIP_DT_FLOGI; 474 op = FIP_DT_FLOGI;
470 break; 475 break;
471 case ELS_FDISC: 476 case ELS_FDISC:
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 32ef6b87d895..a84072865fc2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
680 } 680 }
681 681
682 lp->max_retry_count = fnic->config.flogi_retries; 682 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->max_rport_retry_count = fnic->config.plogi_retries;
683 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 684 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
684 FCP_SPPF_CONF_COMPL); 685 FCP_SPPF_CONF_COMPL);
685 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) 686 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e13..1258da34fbc2 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
152 struct Scsi_Host *host, gdth_ha_str *ha) 152 struct Scsi_Host *host, gdth_ha_str *ha)
153{ 153{
154 int size = 0,len = 0; 154 int size = 0,len = 0;
155 int hlen;
155 off_t begin = 0,pos = 0; 156 off_t begin = 0,pos = 0;
156 int id, i, j, k, sec, flag; 157 int id, i, j, k, sec, flag;
157 int no_mdrv = 0, drv_no, is_mirr; 158 int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
192 if (reserve_list[0] == 0xff) 193 if (reserve_list[0] == 0xff)
193 strcpy(hrec, "--"); 194 strcpy(hrec, "--");
194 else { 195 else {
195 sprintf(hrec, "%d", reserve_list[0]); 196 hlen = sprintf(hrec, "%d", reserve_list[0]);
196 for (i = 1; i < MAX_RES_ARGS; i++) { 197 for (i = 1; i < MAX_RES_ARGS; i++) {
197 if (reserve_list[i] == 0xff) 198 if (reserve_list[i] == 0xff)
198 break; 199 break;
199 sprintf(hrec,"%s,%d", hrec, reserve_list[i]); 200 hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
200 } 201 }
201 } 202 }
202 size = sprintf(buffer+len, 203 size = sprintf(buffer+len,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea4abee7a2a9..b4b805e8d7db 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -110,7 +110,7 @@ static const struct {
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, 110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, 111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, 112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, 113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, 114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); 143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); 144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
145static void ibmvfc_tgt_query_target(struct ibmvfc_target *); 145static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
146static void ibmvfc_npiv_logout(struct ibmvfc_host *);
146 147
147static const char *unknown_error = "unknown error"; 148static const char *unknown_error = "unknown error";
148 149
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
275 int fc_rsp_len = rsp->fcp_rsp_len; 276 int fc_rsp_len = rsp->fcp_rsp_len;
276 277
277 if ((rsp->flags & FCP_RSP_LEN_VALID) && 278 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
278 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || 279 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
279 rsp->data.info.rsp_code)) 280 rsp->data.info.rsp_code))
280 return DID_ERROR << 16; 281 return DID_ERROR << 16;
281 282
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
431 case IBMVFC_TGT_ACTION_DEL_RPORT: 432 case IBMVFC_TGT_ACTION_DEL_RPORT:
432 break; 433 break;
433 default: 434 default:
435 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
436 tgt->add_rport = 0;
434 tgt->action = action; 437 tgt->action = action;
435 break; 438 break;
436 } 439 }
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
475 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) 478 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
476 vhost->action = action; 479 vhost->action = action;
477 break; 480 break;
481 case IBMVFC_HOST_ACTION_LOGO_WAIT:
482 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
483 vhost->action = action;
484 break;
478 case IBMVFC_HOST_ACTION_INIT_WAIT: 485 case IBMVFC_HOST_ACTION_INIT_WAIT:
479 if (vhost->action == IBMVFC_HOST_ACTION_INIT) 486 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
480 vhost->action = action; 487 vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
483 switch (vhost->action) { 490 switch (vhost->action) {
484 case IBMVFC_HOST_ACTION_INIT_WAIT: 491 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 case IBMVFC_HOST_ACTION_NONE: 492 case IBMVFC_HOST_ACTION_NONE:
486 case IBMVFC_HOST_ACTION_TGT_ADD: 493 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
487 vhost->action = action; 494 vhost->action = action;
488 break; 495 break;
489 default: 496 default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
494 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 501 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
495 vhost->action = action; 502 vhost->action = action;
496 break; 503 break;
504 case IBMVFC_HOST_ACTION_LOGO:
497 case IBMVFC_HOST_ACTION_INIT: 505 case IBMVFC_HOST_ACTION_INIT:
498 case IBMVFC_HOST_ACTION_TGT_DEL: 506 case IBMVFC_HOST_ACTION_TGT_DEL:
499 case IBMVFC_HOST_ACTION_QUERY_TGTS: 507 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 508 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
501 case IBMVFC_HOST_ACTION_TGT_ADD:
502 case IBMVFC_HOST_ACTION_NONE: 509 case IBMVFC_HOST_ACTION_NONE:
503 default: 510 default:
504 vhost->action = action; 511 vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
576 } 583 }
577 584
578 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
579 tgt->need_login = 1; 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
580 scsi_block_requests(vhost->host); 587 scsi_block_requests(vhost->host);
581 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 588 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
582 vhost->job_step = ibmvfc_npiv_login; 589 vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
646 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
647 654
648 vhost->state = IBMVFC_NO_CRQ; 655 vhost->state = IBMVFC_NO_CRQ;
656 vhost->logged_in = 0;
649 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 657 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
650 free_page((unsigned long)crq->msgs); 658 free_page((unsigned long)crq->msgs);
651} 659}
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
692 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 700 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
693 701
694 vhost->state = IBMVFC_NO_CRQ; 702 vhost->state = IBMVFC_NO_CRQ;
703 vhost->logged_in = 0;
695 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 704 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
696 705
697 /* Clean out the queue */ 706 /* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
807} 816}
808 817
809/** 818/**
810 * __ibmvfc_reset_host - Reset the connection to the server (no locking) 819 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
811 * @vhost: struct ibmvfc host to reset 820 * @vhost: struct ibmvfc host to reset
812 **/ 821 **/
813static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) 822static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
814{ 823{
815 int rc; 824 int rc;
816 825
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
826} 835}
827 836
828/** 837/**
829 * ibmvfc_reset_host - Reset the connection to the server 838 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
830 * @vhost: struct ibmvfc host to reset 839 * @vhost: struct ibmvfc host to reset
831 **/ 840 **/
841static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
842{
843 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
844 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
845 scsi_block_requests(vhost->host);
846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
847 vhost->job_step = ibmvfc_npiv_logout;
848 wake_up(&vhost->work_wait_q);
849 } else
850 ibmvfc_hard_reset_host(vhost);
851}
852
853/**
854 * ibmvfc_reset_host - Reset the connection to the server
855 * @vhost: ibmvfc host struct
856 **/
832static void ibmvfc_reset_host(struct ibmvfc_host *vhost) 857static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
833{ 858{
834 unsigned long flags; 859 unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
842 * ibmvfc_retry_host_init - Retry host initialization if allowed 867 * ibmvfc_retry_host_init - Retry host initialization if allowed
843 * @vhost: ibmvfc host struct 868 * @vhost: ibmvfc host struct
844 * 869 *
870 * Returns: 1 if init will be retried / 0 if not
871 *
845 **/ 872 **/
846static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 873static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
847{ 874{
875 int retry = 0;
876
848 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 877 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
849 vhost->delay_init = 1; 878 vhost->delay_init = 1;
850 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { 879 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 882 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) 883 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 884 __ibmvfc_reset_host(vhost);
856 else 885 else {
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
887 retry = 1;
888 }
858 } 889 }
859 890
860 wake_up(&vhost->work_wait_q); 891 wake_up(&vhost->work_wait_q);
892 return retry;
861} 893}
862 894
863/** 895/**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1137 login_info->partition_num = vhost->partition_number; 1169 login_info->partition_num = vhost->partition_number;
1138 login_info->vfc_frame_version = 1; 1170 login_info->vfc_frame_version = 1;
1139 login_info->fcp_version = 3; 1171 login_info->fcp_version = 3;
1172 login_info->flags = IBMVFC_FLUSH_ON_HALT;
1140 if (vhost->client_migrated) 1173 if (vhost->client_migrated)
1141 login_info->flags = IBMVFC_CLIENT_MIGRATED; 1174 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1142 1175
1143 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1176 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1144 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1177 login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1452} 1485}
1453 1486
1454/** 1487/**
1488 * ibmvfc_relogin - Log back into the specified device
1489 * @sdev: scsi device struct
1490 *
1491 **/
1492static void ibmvfc_relogin(struct scsi_device *sdev)
1493{
1494 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1495 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1496 struct ibmvfc_target *tgt;
1497
1498 list_for_each_entry(tgt, &vhost->targets, queue) {
1499 if (rport == tgt->rport) {
1500 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1501 break;
1502 }
1503 }
1504
1505 ibmvfc_reinit_host(vhost);
1506}
1507
1508/**
1455 * ibmvfc_scsi_done - Handle responses from commands 1509 * ibmvfc_scsi_done - Handle responses from commands
1456 * @evt: ibmvfc event to be handled 1510 * @evt: ibmvfc event to be handled
1457 * 1511 *
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1483 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1537 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1484 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1538 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1485 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) 1539 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1486 ibmvfc_reinit_host(evt->vhost); 1540 ibmvfc_relogin(cmnd->device);
1487 1541
1488 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) 1542 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1489 cmnd->result = (DID_ERROR << 16); 1543 cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2148 struct ibmvfc_host *vhost) 2202 struct ibmvfc_host *vhost)
2149{ 2203{
2150 const char *desc = ibmvfc_get_ae_desc(crq->event); 2204 const char *desc = ibmvfc_get_ae_desc(crq->event);
2205 struct ibmvfc_target *tgt;
2151 2206
2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2207 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2153 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2208 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2154 2209
2155 switch (crq->event) { 2210 switch (crq->event) {
2156 case IBMVFC_AE_LINK_UP:
2157 case IBMVFC_AE_RESUME: 2211 case IBMVFC_AE_RESUME:
2212 switch (crq->link_state) {
2213 case IBMVFC_AE_LS_LINK_DOWN:
2214 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2215 break;
2216 case IBMVFC_AE_LS_LINK_DEAD:
2217 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2218 break;
2219 case IBMVFC_AE_LS_LINK_UP:
2220 case IBMVFC_AE_LS_LINK_BOUNCED:
2221 default:
2222 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2223 vhost->delay_init = 1;
2224 __ibmvfc_reset_host(vhost);
2225 break;
2226 };
2227
2228 break;
2229 case IBMVFC_AE_LINK_UP:
2158 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2230 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2159 vhost->delay_init = 1; 2231 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost); 2232 __ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2168 case IBMVFC_AE_SCN_NPORT: 2240 case IBMVFC_AE_SCN_NPORT:
2169 case IBMVFC_AE_SCN_GROUP: 2241 case IBMVFC_AE_SCN_GROUP:
2170 vhost->events_to_log |= IBMVFC_AE_RSCN; 2242 vhost->events_to_log |= IBMVFC_AE_RSCN;
2243 ibmvfc_reinit_host(vhost);
2244 break;
2171 case IBMVFC_AE_ELS_LOGO: 2245 case IBMVFC_AE_ELS_LOGO:
2172 case IBMVFC_AE_ELS_PRLO: 2246 case IBMVFC_AE_ELS_PRLO:
2173 case IBMVFC_AE_ELS_PLOGI: 2247 case IBMVFC_AE_ELS_PLOGI:
2248 list_for_each_entry(tgt, &vhost->targets, queue) {
2249 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2250 break;
2251 if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2252 continue;
2253 if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2254 continue;
2255 if (crq->node_name && tgt->ids.node_name != crq->node_name)
2256 continue;
2257 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2258 }
2259
2174 ibmvfc_reinit_host(vhost); 2260 ibmvfc_reinit_host(vhost);
2175 break; 2261 break;
2176 case IBMVFC_AE_LINK_DOWN: 2262 case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2222 return; 2308 return;
2223 case IBMVFC_CRQ_XPORT_EVENT: 2309 case IBMVFC_CRQ_XPORT_EVENT:
2224 vhost->state = IBMVFC_NO_CRQ; 2310 vhost->state = IBMVFC_NO_CRQ;
2311 vhost->logged_in = 0;
2225 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2312 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2226 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2313 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2227 /* We need to re-setup the interpartition connection */ 2314 /* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2299 done = 1; 2386 done = 1;
2300 } 2387 }
2301 2388
2302 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) 2389 if (vhost->scan_complete)
2303 done = 1; 2390 done = 1;
2304 spin_unlock_irqrestore(shost->host_lock, flags); 2391 spin_unlock_irqrestore(shost->host_lock, flags);
2305 return done; 2392 return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2434 vhost->login_buf->resp.partition_name); 2521 vhost->login_buf->resp.partition_name);
2435} 2522}
2436 2523
2437static struct device_attribute ibmvfc_host_partition_name = {
2438 .attr = {
2439 .name = "partition_name",
2440 .mode = S_IRUGO,
2441 },
2442 .show = ibmvfc_show_host_partition_name,
2443};
2444
2445static ssize_t ibmvfc_show_host_device_name(struct device *dev, 2524static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2446 struct device_attribute *attr, char *buf) 2525 struct device_attribute *attr, char *buf)
2447{ 2526{
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2452 vhost->login_buf->resp.device_name); 2531 vhost->login_buf->resp.device_name);
2453} 2532}
2454 2533
2455static struct device_attribute ibmvfc_host_device_name = {
2456 .attr = {
2457 .name = "device_name",
2458 .mode = S_IRUGO,
2459 },
2460 .show = ibmvfc_show_host_device_name,
2461};
2462
2463static ssize_t ibmvfc_show_host_loc_code(struct device *dev, 2534static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2464 struct device_attribute *attr, char *buf) 2535 struct device_attribute *attr, char *buf)
2465{ 2536{
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2470 vhost->login_buf->resp.port_loc_code); 2541 vhost->login_buf->resp.port_loc_code);
2471} 2542}
2472 2543
2473static struct device_attribute ibmvfc_host_loc_code = {
2474 .attr = {
2475 .name = "port_loc_code",
2476 .mode = S_IRUGO,
2477 },
2478 .show = ibmvfc_show_host_loc_code,
2479};
2480
2481static ssize_t ibmvfc_show_host_drc_name(struct device *dev, 2544static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2482 struct device_attribute *attr, char *buf) 2545 struct device_attribute *attr, char *buf)
2483{ 2546{
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2488 vhost->login_buf->resp.drc_name); 2551 vhost->login_buf->resp.drc_name);
2489} 2552}
2490 2553
2491static struct device_attribute ibmvfc_host_drc_name = {
2492 .attr = {
2493 .name = "drc_name",
2494 .mode = S_IRUGO,
2495 },
2496 .show = ibmvfc_show_host_drc_name,
2497};
2498
2499static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, 2554static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2500 struct device_attribute *attr, char *buf) 2555 struct device_attribute *attr, char *buf)
2501{ 2556{
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2504 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); 2559 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2505} 2560}
2506 2561
2507static struct device_attribute ibmvfc_host_npiv_version = { 2562static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2508 .attr = { 2563 struct device_attribute *attr, char *buf)
2509 .name = "npiv_version", 2564{
2510 .mode = S_IRUGO, 2565 struct Scsi_Host *shost = class_to_shost(dev);
2511 }, 2566 struct ibmvfc_host *vhost = shost_priv(shost);
2512 .show = ibmvfc_show_host_npiv_version, 2567 return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2513}; 2568}
2514 2569
2515/** 2570/**
2516 * ibmvfc_show_log_level - Show the adapter's error logging level 2571 * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
2556 return strlen(buf); 2611 return strlen(buf);
2557} 2612}
2558 2613
2559static struct device_attribute ibmvfc_log_level_attr = { 2614static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2560 .attr = { 2615static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2561 .name = "log_level", 2616static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2562 .mode = S_IRUGO | S_IWUSR, 2617static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2563 }, 2618static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2564 .show = ibmvfc_show_log_level, 2619static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2565 .store = ibmvfc_store_log_level 2620static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2566}; 2621 ibmvfc_show_log_level, ibmvfc_store_log_level);
2567 2622
2568#ifdef CONFIG_SCSI_IBMVFC_TRACE 2623#ifdef CONFIG_SCSI_IBMVFC_TRACE
2569/** 2624/**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
2612#endif 2667#endif
2613 2668
2614static struct device_attribute *ibmvfc_attrs[] = { 2669static struct device_attribute *ibmvfc_attrs[] = {
2615 &ibmvfc_host_partition_name, 2670 &dev_attr_partition_name,
2616 &ibmvfc_host_device_name, 2671 &dev_attr_device_name,
2617 &ibmvfc_host_loc_code, 2672 &dev_attr_port_loc_code,
2618 &ibmvfc_host_drc_name, 2673 &dev_attr_drc_name,
2619 &ibmvfc_host_npiv_version, 2674 &dev_attr_npiv_version,
2620 &ibmvfc_log_level_attr, 2675 &dev_attr_capabilities,
2676 &dev_attr_log_level,
2621 NULL 2677 NULL
2622}; 2678};
2623 2679
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2774 * @tgt: ibmvfc target struct 2830 * @tgt: ibmvfc target struct
2775 * @job_step: initialization job step 2831 * @job_step: initialization job step
2776 * 2832 *
2833 * Returns: 1 if step will be retried / 0 if not
2834 *
2777 **/ 2835 **/
2778static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2836static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2779 void (*job_step) (struct ibmvfc_target *)) 2837 void (*job_step) (struct ibmvfc_target *))
2780{ 2838{
2781 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { 2839 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2782 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2840 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2783 wake_up(&tgt->vhost->work_wait_q); 2841 wake_up(&tgt->vhost->work_wait_q);
2842 return 0;
2784 } else 2843 } else
2785 ibmvfc_init_tgt(tgt, job_step); 2844 ibmvfc_init_tgt(tgt, job_step);
2845 return 1;
2786} 2846}
2787 2847
2788/* Defined in FC-LS */ 2848/* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2831 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; 2891 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2832 struct ibmvfc_prli_svc_parms *parms = &rsp->parms; 2892 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
2833 u32 status = rsp->common.status; 2893 u32 status = rsp->common.status;
2834 int index; 2894 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
2835 2895
2836 vhost->discovery_threads--; 2896 vhost->discovery_threads--;
2837 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2897 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2850 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; 2910 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
2851 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) 2911 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
2852 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 2912 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
2853 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); 2913 tgt->add_rport = 1;
2854 } else 2914 } else
2855 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2915 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2856 } else if (prli_rsp[index].retry) 2916 } else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2867 break; 2927 break;
2868 case IBMVFC_MAD_FAILED: 2928 case IBMVFC_MAD_FAILED:
2869 default: 2929 default:
2870 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2871 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2872 rsp->status, rsp->error, status);
2873 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2930 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2874 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2931 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2875 else 2932 else
2876 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2933 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2934
2935 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2936 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2937 rsp->status, rsp->error, status);
2877 break; 2938 break;
2878 }; 2939 };
2879 2940
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2932 struct ibmvfc_host *vhost = evt->vhost; 2993 struct ibmvfc_host *vhost = evt->vhost;
2933 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; 2994 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2934 u32 status = rsp->common.status; 2995 u32 status = rsp->common.status;
2996 int level = IBMVFC_DEFAULT_LOG_LEVEL;
2935 2997
2936 vhost->discovery_threads--; 2998 vhost->discovery_threads--;
2937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2999 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2960 break; 3022 break;
2961 case IBMVFC_MAD_FAILED: 3023 case IBMVFC_MAD_FAILED:
2962 default: 3024 default:
2963 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2964 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2965 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2966 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2967
2968 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3025 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2969 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 3026 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2970 else 3027 else
2971 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3028 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3029
3030 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3031 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3032 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3033 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2972 break; 3034 break;
2973 }; 3035 };
2974 3036
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3129 case IBMVFC_MAD_SUCCESS: 3191 case IBMVFC_MAD_SUCCESS:
3130 tgt_dbg(tgt, "ADISC succeeded\n"); 3192 tgt_dbg(tgt, "ADISC succeeded\n");
3131 if (ibmvfc_adisc_needs_plogi(mad, tgt)) 3193 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3132 tgt->need_login = 1; 3194 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3133 break; 3195 break;
3134 case IBMVFC_MAD_DRIVER_FAILED: 3196 case IBMVFC_MAD_DRIVER_FAILED:
3135 break; 3197 break;
3136 case IBMVFC_MAD_FAILED: 3198 case IBMVFC_MAD_FAILED:
3137 default: 3199 default:
3138 tgt->need_login = 1; 3200 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3139 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; 3201 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3140 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; 3202 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3141 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3203 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3322 struct ibmvfc_host *vhost = evt->vhost; 3384 struct ibmvfc_host *vhost = evt->vhost;
3323 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; 3385 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3324 u32 status = rsp->common.status; 3386 u32 status = rsp->common.status;
3387 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3325 3388
3326 vhost->discovery_threads--; 3389 vhost->discovery_threads--;
3327 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3390 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3341 break; 3404 break;
3342 case IBMVFC_MAD_FAILED: 3405 case IBMVFC_MAD_FAILED:
3343 default: 3406 default:
3344 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3345 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3346 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3347 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3348
3349 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && 3407 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3350 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && 3408 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3351 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) 3409 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3352 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3410 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3353 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3411 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3354 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3412 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3355 else 3413 else
3356 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3414 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3415
3416 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3417 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3418 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3419 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3357 break; 3420 break;
3358 }; 3421 };
3359 3422
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3420 } 3483 }
3421 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3484 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3422 3485
3423 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); 3486 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3424 if (!tgt) { 3487 if (!tgt) {
3425 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", 3488 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3426 scsi_id); 3489 scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3472 struct ibmvfc_host *vhost = evt->vhost; 3535 struct ibmvfc_host *vhost = evt->vhost;
3473 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; 3536 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3474 u32 mad_status = rsp->common.status; 3537 u32 mad_status = rsp->common.status;
3538 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3475 3539
3476 switch (mad_status) { 3540 switch (mad_status) {
3477 case IBMVFC_MAD_SUCCESS: 3541 case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3480 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); 3544 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3481 break; 3545 break;
3482 case IBMVFC_MAD_FAILED: 3546 case IBMVFC_MAD_FAILED:
3483 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", 3547 level += ibmvfc_retry_host_init(vhost);
3484 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); 3548 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3485 ibmvfc_retry_host_init(vhost); 3549 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3486 break; 3550 break;
3487 case IBMVFC_MAD_DRIVER_FAILED: 3551 case IBMVFC_MAD_DRIVER_FAILED:
3488 break; 3552 break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3534 u32 mad_status = evt->xfer_iu->npiv_login.common.status; 3598 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3535 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; 3599 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3536 unsigned int npiv_max_sectors; 3600 unsigned int npiv_max_sectors;
3601 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3537 3602
3538 switch (mad_status) { 3603 switch (mad_status) {
3539 case IBMVFC_MAD_SUCCESS: 3604 case IBMVFC_MAD_SUCCESS:
3540 ibmvfc_free_event(evt); 3605 ibmvfc_free_event(evt);
3541 break; 3606 break;
3542 case IBMVFC_MAD_FAILED: 3607 case IBMVFC_MAD_FAILED:
3543 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3544 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3545 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3608 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3546 ibmvfc_retry_host_init(vhost); 3609 level += ibmvfc_retry_host_init(vhost);
3547 else 3610 else
3548 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 3611 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3612 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3613 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3549 ibmvfc_free_event(evt); 3614 ibmvfc_free_event(evt);
3550 return; 3615 return;
3551 case IBMVFC_MAD_CRQ_ERROR: 3616 case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3578 return; 3643 return;
3579 } 3644 }
3580 3645
3646 vhost->logged_in = 1;
3581 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); 3647 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3582 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", 3648 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3583 rsp->partition_name, rsp->device_name, rsp->port_loc_code, 3649 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3636}; 3702};
3637 3703
3638/** 3704/**
3705 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
3706 * @vhost: ibmvfc host struct
3707 *
3708 **/
3709static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3710{
3711 struct ibmvfc_host *vhost = evt->vhost;
3712 u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
3713
3714 ibmvfc_free_event(evt);
3715
3716 switch (mad_status) {
3717 case IBMVFC_MAD_SUCCESS:
3718 if (list_empty(&vhost->sent) &&
3719 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3720 ibmvfc_init_host(vhost, 0);
3721 return;
3722 }
3723 break;
3724 case IBMVFC_MAD_FAILED:
3725 case IBMVFC_MAD_NOT_SUPPORTED:
3726 case IBMVFC_MAD_CRQ_ERROR:
3727 case IBMVFC_MAD_DRIVER_FAILED:
3728 default:
3729 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
3730 break;
3731 }
3732
3733 ibmvfc_hard_reset_host(vhost);
3734}
3735
3736/**
3737 * ibmvfc_npiv_logout - Issue an NPIV Logout
3738 * @vhost: ibmvfc host struct
3739 *
3740 **/
3741static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
3742{
3743 struct ibmvfc_npiv_logout_mad *mad;
3744 struct ibmvfc_event *evt;
3745
3746 evt = ibmvfc_get_event(vhost);
3747 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
3748
3749 mad = &evt->iu.npiv_logout;
3750 memset(mad, 0, sizeof(*mad));
3751 mad->common.version = 1;
3752 mad->common.opcode = IBMVFC_NPIV_LOGOUT;
3753 mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
3754
3755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
3756
3757 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3758 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
3759 else
3760 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3761}
3762
3763/**
3639 * ibmvfc_dev_init_to_do - Is there target initialization work to do? 3764 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3640 * @vhost: ibmvfc host struct 3765 * @vhost: ibmvfc host struct
3641 * 3766 *
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3671 switch (vhost->action) { 3796 switch (vhost->action) {
3672 case IBMVFC_HOST_ACTION_NONE: 3797 case IBMVFC_HOST_ACTION_NONE:
3673 case IBMVFC_HOST_ACTION_INIT_WAIT: 3798 case IBMVFC_HOST_ACTION_INIT_WAIT:
3799 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3674 return 0; 3800 return 0;
3675 case IBMVFC_HOST_ACTION_TGT_INIT: 3801 case IBMVFC_HOST_ACTION_TGT_INIT:
3676 case IBMVFC_HOST_ACTION_QUERY_TGTS: 3802 case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3683 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) 3809 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3684 return 0; 3810 return 0;
3685 return 1; 3811 return 1;
3812 case IBMVFC_HOST_ACTION_LOGO:
3686 case IBMVFC_HOST_ACTION_INIT: 3813 case IBMVFC_HOST_ACTION_INIT:
3687 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3814 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3688 case IBMVFC_HOST_ACTION_TGT_ADD:
3689 case IBMVFC_HOST_ACTION_TGT_DEL: 3815 case IBMVFC_HOST_ACTION_TGT_DEL:
3690 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 3816 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3691 case IBMVFC_HOST_ACTION_QUERY: 3817 case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3740static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) 3866static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3741{ 3867{
3742 struct ibmvfc_host *vhost = tgt->vhost; 3868 struct ibmvfc_host *vhost = tgt->vhost;
3743 struct fc_rport *rport = tgt->rport; 3869 struct fc_rport *rport;
3744 unsigned long flags; 3870 unsigned long flags;
3745 3871
3746 if (rport) { 3872 tgt_dbg(tgt, "Adding rport\n");
3747 tgt_dbg(tgt, "Setting rport roles\n"); 3873 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3748 fc_remote_port_rolechg(rport, tgt->ids.roles); 3874 spin_lock_irqsave(vhost->host->host_lock, flags);
3749 spin_lock_irqsave(vhost->host->host_lock, flags); 3875
3750 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3876 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3877 tgt_dbg(tgt, "Deleting rport\n");
3878 list_del(&tgt->queue);
3751 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3879 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3880 fc_remote_port_delete(rport);
3881 del_timer_sync(&tgt->timer);
3882 kref_put(&tgt->kref, ibmvfc_release_tgt);
3752 return; 3883 return;
3753 } 3884 }
3754 3885
3755 tgt_dbg(tgt, "Adding rport\n");
3756 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3757 spin_lock_irqsave(vhost->host->host_lock, flags);
3758 tgt->rport = rport;
3759 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3760 if (rport) { 3886 if (rport) {
3761 tgt_dbg(tgt, "rport add succeeded\n"); 3887 tgt_dbg(tgt, "rport add succeeded\n");
3888 tgt->rport = rport;
3762 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3889 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3763 rport->supported_classes = 0; 3890 rport->supported_classes = 0;
3764 tgt->target_id = rport->scsi_target_id; 3891 tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3789 vhost->events_to_log = 0; 3916 vhost->events_to_log = 0;
3790 switch (vhost->action) { 3917 switch (vhost->action) {
3791 case IBMVFC_HOST_ACTION_NONE: 3918 case IBMVFC_HOST_ACTION_NONE:
3919 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3792 case IBMVFC_HOST_ACTION_INIT_WAIT: 3920 case IBMVFC_HOST_ACTION_INIT_WAIT:
3793 break; 3921 break;
3922 case IBMVFC_HOST_ACTION_LOGO:
3923 vhost->job_step(vhost);
3924 break;
3794 case IBMVFC_HOST_ACTION_INIT: 3925 case IBMVFC_HOST_ACTION_INIT:
3795 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3926 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3796 if (vhost->delay_init) { 3927 if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3836 3967
3837 if (vhost->state == IBMVFC_INITIALIZING) { 3968 if (vhost->state == IBMVFC_INITIALIZING) {
3838 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { 3969 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3839 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3970 if (vhost->reinit) {
3840 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); 3971 vhost->reinit = 0;
3841 vhost->init_retries = 0; 3972 scsi_block_requests(vhost->host);
3842 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3973 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3843 scsi_unblock_requests(vhost->host); 3974 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3975 } else {
3976 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3977 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3978 wake_up(&vhost->init_wait_q);
3979 schedule_work(&vhost->rport_add_work_q);
3980 vhost->init_retries = 0;
3981 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3982 scsi_unblock_requests(vhost->host);
3983 }
3984
3844 return; 3985 return;
3845 } else { 3986 } else {
3846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3987 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3871 if (!ibmvfc_dev_init_to_do(vhost)) 4012 if (!ibmvfc_dev_init_to_do(vhost))
3872 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); 4013 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3873 break; 4014 break;
3874 case IBMVFC_HOST_ACTION_TGT_ADD:
3875 list_for_each_entry(tgt, &vhost->targets, queue) {
3876 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3877 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3878 ibmvfc_tgt_add_rport(tgt);
3879 return;
3880 }
3881 }
3882
3883 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3884 vhost->reinit = 0;
3885 scsi_block_requests(vhost->host);
3886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3887 } else {
3888 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3889 wake_up(&vhost->init_wait_q);
3890 }
3891 break;
3892 default: 4015 default:
3893 break; 4016 break;
3894 }; 4017 };
@@ -4118,6 +4241,56 @@ nomem:
4118} 4241}
4119 4242
4120/** 4243/**
4244 * ibmvfc_rport_add_thread - Worker thread for rport adds
4245 * @work: work struct
4246 *
4247 **/
4248static void ibmvfc_rport_add_thread(struct work_struct *work)
4249{
4250 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4251 rport_add_work_q);
4252 struct ibmvfc_target *tgt;
4253 struct fc_rport *rport;
4254 unsigned long flags;
4255 int did_work;
4256
4257 ENTER;
4258 spin_lock_irqsave(vhost->host->host_lock, flags);
4259 do {
4260 did_work = 0;
4261 if (vhost->state != IBMVFC_ACTIVE)
4262 break;
4263
4264 list_for_each_entry(tgt, &vhost->targets, queue) {
4265 if (tgt->add_rport) {
4266 did_work = 1;
4267 tgt->add_rport = 0;
4268 kref_get(&tgt->kref);
4269 rport = tgt->rport;
4270 if (!rport) {
4271 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4272 ibmvfc_tgt_add_rport(tgt);
4273 } else if (get_device(&rport->dev)) {
4274 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4275 tgt_dbg(tgt, "Setting rport roles\n");
4276 fc_remote_port_rolechg(rport, tgt->ids.roles);
4277 put_device(&rport->dev);
4278 }
4279
4280 kref_put(&tgt->kref, ibmvfc_release_tgt);
4281 spin_lock_irqsave(vhost->host->host_lock, flags);
4282 break;
4283 }
4284 }
4285 } while(did_work);
4286
4287 if (vhost->state == IBMVFC_ACTIVE)
4288 vhost->scan_complete = 1;
4289 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4290 LEAVE;
4291}
4292
4293/**
4121 * ibmvfc_probe - Adapter hot plug add entry point 4294 * ibmvfc_probe - Adapter hot plug add entry point
4122 * @vdev: vio device struct 4295 * @vdev: vio device struct
4123 * @id: vio device id struct 4296 * @id: vio device id struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4160 strcpy(vhost->partition_name, "UNKNOWN"); 4333 strcpy(vhost->partition_name, "UNKNOWN");
4161 init_waitqueue_head(&vhost->work_wait_q); 4334 init_waitqueue_head(&vhost->work_wait_q);
4162 init_waitqueue_head(&vhost->init_wait_q); 4335 init_waitqueue_head(&vhost->init_wait_q);
4336 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4163 4337
4164 if ((rc = ibmvfc_alloc_mem(vhost))) 4338 if ((rc = ibmvfc_alloc_mem(vhost)))
4165 goto free_scsi_host; 4339 goto free_scsi_host;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ca1dcf7a7568..c2668d7d67f5 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.5" 32#define IBMVFC_DRIVER_VERSION "1.0.6"
33#define IBMVFC_DRIVER_DATE "(March 19, 2009)" 33#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -57,9 +57,10 @@
57 * Ensure we have resources for ERP and initialization: 57 * Ensure we have resources for ERP and initialization:
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout
60 * 2 for each discovery thread 61 * 2 for each discovery thread
61 */ 62 */
62#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) 63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
63 64
64#define IBMVFC_MAD_SUCCESS 0x00 65#define IBMVFC_MAD_SUCCESS 0x00
65#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
127 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 128 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
128 IBMVFC_PASSTHRU = 0x0200, 129 IBMVFC_PASSTHRU = 0x0200,
129 IBMVFC_TMF_MAD = 0x0100, 130 IBMVFC_TMF_MAD = 0x0100,
131 IBMVFC_NPIV_LOGOUT = 0x0800,
130}; 132};
131 133
132struct ibmvfc_mad_common { 134struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
143 struct srp_direct_buf buffer; 145 struct srp_direct_buf buffer;
144}__attribute__((packed, aligned (8))); 146}__attribute__((packed, aligned (8)));
145 147
148struct ibmvfc_npiv_logout_mad {
149 struct ibmvfc_mad_common common;
150}__attribute__((packed, aligned (8)));
151
146#define IBMVFC_MAX_NAME 256 152#define IBMVFC_MAX_NAME 256
147 153
148struct ibmvfc_npiv_login { 154struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
201#define IBMVFC_NATIVE_FC 0x01 207#define IBMVFC_NATIVE_FC 0x01
202#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 208#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
203 u32 reserved; 209 u32 reserved;
204 u64 capabilites; 210 u64 capabilities;
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
205 u32 max_cmds; 212 u32 max_cmds;
206 u32 scsi_id_sz; 213 u32 scsi_id_sz;
207 u64 max_dma_len; 214 u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
541 dma_addr_t msg_token; 548 dma_addr_t msg_token;
542}; 549};
543 550
551enum ibmvfc_ae_link_state {
552 IBMVFC_AE_LS_LINK_UP = 0x01,
553 IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
554 IBMVFC_AE_LS_LINK_DOWN = 0x04,
555 IBMVFC_AE_LS_LINK_DEAD = 0x08,
556};
557
544struct ibmvfc_async_crq { 558struct ibmvfc_async_crq {
545 volatile u8 valid; 559 volatile u8 valid;
546 u8 pad[3]; 560 u8 link_state;
561 u8 pad[2];
547 u32 pad2; 562 u32 pad2;
548 volatile u64 event; 563 volatile u64 event;
549 volatile u64 scsi_id; 564 volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
561union ibmvfc_iu { 576union ibmvfc_iu {
562 struct ibmvfc_mad_common mad_common; 577 struct ibmvfc_mad_common mad_common;
563 struct ibmvfc_npiv_login_mad npiv_login; 578 struct ibmvfc_npiv_login_mad npiv_login;
579 struct ibmvfc_npiv_logout_mad npiv_logout;
564 struct ibmvfc_discover_targets discover_targets; 580 struct ibmvfc_discover_targets discover_targets;
565 struct ibmvfc_port_login plogi; 581 struct ibmvfc_port_login plogi;
566 struct ibmvfc_process_login prli; 582 struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
575 IBMVFC_TGT_ACTION_NONE = 0, 591 IBMVFC_TGT_ACTION_NONE = 0,
576 IBMVFC_TGT_ACTION_INIT, 592 IBMVFC_TGT_ACTION_INIT,
577 IBMVFC_TGT_ACTION_INIT_WAIT, 593 IBMVFC_TGT_ACTION_INIT_WAIT,
578 IBMVFC_TGT_ACTION_ADD_RPORT,
579 IBMVFC_TGT_ACTION_DEL_RPORT, 594 IBMVFC_TGT_ACTION_DEL_RPORT,
580}; 595};
581 596
@@ -588,6 +603,7 @@ struct ibmvfc_target {
588 int target_id; 603 int target_id;
589 enum ibmvfc_target_action action; 604 enum ibmvfc_target_action action;
590 int need_login; 605 int need_login;
606 int add_rport;
591 int init_retries; 607 int init_retries;
592 u32 cancel_key; 608 u32 cancel_key;
593 struct ibmvfc_service_parms service_parms; 609 struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
627 643
628enum ibmvfc_host_action { 644enum ibmvfc_host_action {
629 IBMVFC_HOST_ACTION_NONE = 0, 645 IBMVFC_HOST_ACTION_NONE = 0,
646 IBMVFC_HOST_ACTION_LOGO,
647 IBMVFC_HOST_ACTION_LOGO_WAIT,
630 IBMVFC_HOST_ACTION_INIT, 648 IBMVFC_HOST_ACTION_INIT,
631 IBMVFC_HOST_ACTION_INIT_WAIT, 649 IBMVFC_HOST_ACTION_INIT_WAIT,
632 IBMVFC_HOST_ACTION_QUERY, 650 IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
635 IBMVFC_HOST_ACTION_ALLOC_TGTS, 653 IBMVFC_HOST_ACTION_ALLOC_TGTS,
636 IBMVFC_HOST_ACTION_TGT_INIT, 654 IBMVFC_HOST_ACTION_TGT_INIT,
637 IBMVFC_HOST_ACTION_TGT_DEL_FAILED, 655 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
638 IBMVFC_HOST_ACTION_TGT_ADD,
639}; 656};
640 657
641enum ibmvfc_host_state { 658enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
682 int client_migrated; 699 int client_migrated;
683 int reinit; 700 int reinit;
684 int delay_init; 701 int delay_init;
702 int scan_complete;
703 int logged_in;
685 int events_to_log; 704 int events_to_log;
686#define IBMVFC_AE_LINKUP 0x0001 705#define IBMVFC_AE_LINKUP 0x0001
687#define IBMVFC_AE_LINKDOWN 0x0002 706#define IBMVFC_AE_LINKDOWN 0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
692 void (*job_step) (struct ibmvfc_host *); 711 void (*job_step) (struct ibmvfc_host *);
693 struct task_struct *work_thread; 712 struct task_struct *work_thread;
694 struct tasklet_struct tasklet; 713 struct tasklet_struct tasklet;
714 struct work_struct rport_add_work_q;
695 wait_queue_head_t init_wait_q; 715 wait_queue_head_t init_wait_q;
696 wait_queue_head_t work_wait_q; 716 wait_queue_head_t work_wait_q;
697}; 717};
@@ -707,6 +727,12 @@ struct ibmvfc_host {
707#define tgt_err(t, fmt, ...) \ 727#define tgt_err(t, fmt, ...) \
708 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 728 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
709 729
730#define tgt_log(t, level, fmt, ...) \
731 do { \
732 if ((t)->vhost->log_level >= level) \
733 tgt_err(t, fmt, ##__VA_ARGS__); \
734 } while (0)
735
710#define ibmvfc_dbg(vhost, ...) \ 736#define ibmvfc_dbg(vhost, ...) \
711 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) 737 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
712 738
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index c9aa7611e408..11d2602ae88e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -70,6 +70,7 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/of.h>
73#include <asm/firmware.h> 74#include <asm/firmware.h>
74#include <asm/vio.h> 75#include <asm/vio.h>
75#include <asm/firmware.h> 76#include <asm/firmware.h>
@@ -87,9 +88,15 @@
87 */ 88 */
88static int max_id = 64; 89static int max_id = 64;
89static int max_channel = 3; 90static int max_channel = 3;
90static int init_timeout = 5; 91static int init_timeout = 300;
92static int login_timeout = 60;
93static int info_timeout = 30;
94static int abort_timeout = 60;
95static int reset_timeout = 60;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 96static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; 97static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
98static int fast_fail = 1;
99static int client_reserve = 1;
93 100
94static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
95 102
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 117MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
111module_param_named(max_requests, max_requests, int, S_IRUGO); 118module_param_named(max_requests, max_requests, int, S_IRUGO);
112MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 119MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
120module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
122module_param_named(client_reserve, client_reserve, int, S_IRUGO );
123MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
113 124
114/* ------------------------------------------------------------ 125/* ------------------------------------------------------------
115 * Routines for the event pool and event structs 126 * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
781/* ------------------------------------------------------------ 792/* ------------------------------------------------------------
782 * Routines for driver initialization 793 * Routines for driver initialization
783 */ 794 */
795
784/** 796/**
785 * adapter_info_rsp: - Handle response to MAD adapter info request 797 * map_persist_bufs: - Pre-map persistent data for adapter logins
786 * @evt_struct: srp_event_struct with the response 798 * @hostdata: ibmvscsi_host_data of host
787 * 799 *
788 * Used as a "done" callback by when sending adapter_info. Gets called 800 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
789 * by ibmvscsi_handle_crq() 801 * Return 1 on error, 0 on success.
790*/ 802 */
791static void adapter_info_rsp(struct srp_event_struct *evt_struct) 803static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
792{ 804{
793 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
794 dma_unmap_single(hostdata->dev,
795 evt_struct->iu.mad.adapter_info.buffer,
796 evt_struct->iu.mad.adapter_info.common.length,
797 DMA_BIDIRECTIONAL);
798 805
799 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 806 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
800 dev_err(hostdata->dev, "error %d getting adapter info\n", 807 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
801 evt_struct->xfer_iu->mad.adapter_info.common.status); 808
802 } else { 809 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
803 dev_info(hostdata->dev, "host srp version: %s, " 810 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
804 "host partition %s (%d), OS %d, max io %u\n", 811 return 1;
805 hostdata->madapter_info.srp_version,
806 hostdata->madapter_info.partition_name,
807 hostdata->madapter_info.partition_number,
808 hostdata->madapter_info.os_type,
809 hostdata->madapter_info.port_max_txu[0]);
810
811 if (hostdata->madapter_info.port_max_txu[0])
812 hostdata->host->max_sectors =
813 hostdata->madapter_info.port_max_txu[0] >> 9;
814
815 if (hostdata->madapter_info.os_type == 3 &&
816 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
817 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
818 hostdata->madapter_info.srp_version);
819 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
820 MAX_INDIRECT_BUFS);
821 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
822 }
823 } 812 }
813
814 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
815 &hostdata->madapter_info,
816 sizeof(hostdata->madapter_info),
817 DMA_BIDIRECTIONAL);
818 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
819 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
820 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
821 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
822 return 1;
823 }
824
825 return 0;
824} 826}
825 827
826/** 828/**
827 * send_mad_adapter_info: - Sends the mad adapter info request 829 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
828 * and stores the result so it can be retrieved with 830 * @hostdata: ibmvscsi_host_data of host
829 * sysfs. We COULD consider causing a failure if the 831 *
830 * returned SRP version doesn't match ours. 832 * Unmap the capabilities and adapter info DMA buffers
831 * @hostdata: ibmvscsi_host_data of host 833 */
832 * 834static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
833 * Returns zero if successful.
834*/
835static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
836{ 835{
837 struct viosrp_adapter_info *req; 836 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
838 struct srp_event_struct *evt_struct; 837 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
839 unsigned long flags;
840 dma_addr_t addr;
841
842 evt_struct = get_event_struct(&hostdata->pool);
843 if (!evt_struct) {
844 dev_err(hostdata->dev,
845 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
846 return;
847 }
848
849 init_event_struct(evt_struct,
850 adapter_info_rsp,
851 VIOSRP_MAD_FORMAT,
852 init_timeout);
853
854 req = &evt_struct->iu.mad.adapter_info;
855 memset(req, 0x00, sizeof(*req));
856
857 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
858 req->common.length = sizeof(hostdata->madapter_info);
859 req->buffer = addr = dma_map_single(hostdata->dev,
860 &hostdata->madapter_info,
861 sizeof(hostdata->madapter_info),
862 DMA_BIDIRECTIONAL);
863 838
864 if (dma_mapping_error(hostdata->dev, req->buffer)) { 839 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
865 if (!firmware_has_feature(FW_FEATURE_CMO)) 840 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
866 dev_err(hostdata->dev, 841}
867 "Unable to map request_buffer for "
868 "adapter_info!\n");
869 free_event_struct(&hostdata->pool, evt_struct);
870 return;
871 }
872
873 spin_lock_irqsave(hostdata->host->host_lock, flags);
874 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
875 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
876 dma_unmap_single(hostdata->dev,
877 addr,
878 sizeof(hostdata->madapter_info),
879 DMA_BIDIRECTIONAL);
880 }
881 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
882};
883 842
884/** 843/**
885 * login_rsp: - Handle response to SRP login request 844 * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
909 } 868 }
910 869
911 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); 870 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
912 871 hostdata->client_migrated = 0;
913 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
914 dev_err(hostdata->dev, "Invalid request_limit.\n");
915 872
916 /* Now we know what the real request-limit is. 873 /* Now we know what the real request-limit is.
917 * This value is set rather than added to request_limit because 874 * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
922 879
923 /* If we had any pending I/Os, kick them */ 880 /* If we had any pending I/Os, kick them */
924 scsi_unblock_requests(hostdata->host); 881 scsi_unblock_requests(hostdata->host);
925
926 send_mad_adapter_info(hostdata);
927 return;
928} 882}
929 883
930/** 884/**
931 * send_srp_login: - Sends the srp login 885 * send_srp_login: - Sends the srp login
932 * @hostdata: ibmvscsi_host_data of host 886 * @hostdata: ibmvscsi_host_data of host
933 * 887 *
934 * Returns zero if successful. 888 * Returns zero if successful.
935*/ 889*/
936static int send_srp_login(struct ibmvscsi_host_data *hostdata) 890static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
939 unsigned long flags; 893 unsigned long flags;
940 struct srp_login_req *login; 894 struct srp_login_req *login;
941 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 895 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
942 if (!evt_struct) {
943 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
944 return FAILED;
945 }
946 896
947 init_event_struct(evt_struct, 897 BUG_ON(!evt_struct);
948 login_rsp, 898 init_event_struct(evt_struct, login_rsp,
949 VIOSRP_SRP_FORMAT, 899 VIOSRP_SRP_FORMAT, login_timeout);
950 init_timeout);
951 900
952 login = &evt_struct->iu.srp.login_req; 901 login = &evt_struct->iu.srp.login_req;
953 memset(login, 0x00, sizeof(struct srp_login_req)); 902 memset(login, 0, sizeof(*login));
954 login->opcode = SRP_LOGIN_REQ; 903 login->opcode = SRP_LOGIN_REQ;
955 login->req_it_iu_len = sizeof(union srp_iu); 904 login->req_it_iu_len = sizeof(union srp_iu);
956 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 905 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
957 906
958 spin_lock_irqsave(hostdata->host->host_lock, flags); 907 spin_lock_irqsave(hostdata->host->host_lock, flags);
959 /* Start out with a request limit of 0, since this is negotiated in 908 /* Start out with a request limit of 0, since this is negotiated in
960 * the login request we are just sending and login requests always 909 * the login request we are just sending and login requests always
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
962 */ 911 */
963 atomic_set(&hostdata->request_limit, 0); 912 atomic_set(&hostdata->request_limit, 0);
964 913
965 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 914 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
966 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 915 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
967 dev_info(hostdata->dev, "sent SRP login\n"); 916 dev_info(hostdata->dev, "sent SRP login\n");
968 return rc; 917 return rc;
969}; 918};
970 919
971/** 920/**
921 * capabilities_rsp: - Handle response to MAD adapter capabilities request
922 * @evt_struct: srp_event_struct with the response
923 *
924 * Used as a "done" callback by when sending adapter_info.
925 */
926static void capabilities_rsp(struct srp_event_struct *evt_struct)
927{
928 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
929
930 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
931 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
932 evt_struct->xfer_iu->mad.capabilities.common.status);
933 } else {
934 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
935 dev_info(hostdata->dev, "Partition migration not supported\n");
936
937 if (client_reserve) {
938 if (hostdata->caps.reserve.common.server_support ==
939 SERVER_SUPPORTS_CAP)
940 dev_info(hostdata->dev, "Client reserve enabled\n");
941 else
942 dev_info(hostdata->dev, "Client reserve not supported\n");
943 }
944 }
945
946 send_srp_login(hostdata);
947}
948
949/**
950 * send_mad_capabilities: - Sends the mad capabilities request
951 * and stores the result so it can be retrieved with
952 * @hostdata: ibmvscsi_host_data of host
953 */
954static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
955{
956 struct viosrp_capabilities *req;
957 struct srp_event_struct *evt_struct;
958 unsigned long flags;
959 struct device_node *of_node = hostdata->dev->archdata.of_node;
960 const char *location;
961
962 evt_struct = get_event_struct(&hostdata->pool);
963 BUG_ON(!evt_struct);
964
965 init_event_struct(evt_struct, capabilities_rsp,
966 VIOSRP_MAD_FORMAT, info_timeout);
967
968 req = &evt_struct->iu.mad.capabilities;
969 memset(req, 0, sizeof(*req));
970
971 hostdata->caps.flags = CAP_LIST_SUPPORTED;
972 if (hostdata->client_migrated)
973 hostdata->caps.flags |= CLIENT_MIGRATED;
974
975 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
976 sizeof(hostdata->caps.name));
977 hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
978
979 location = of_get_property(of_node, "ibm,loc-code", NULL);
980 location = location ? location : dev_name(hostdata->dev);
981 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
982 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
983
984 req->common.type = VIOSRP_CAPABILITIES_TYPE;
985 req->buffer = hostdata->caps_addr;
986
987 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
988 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
989 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
990 hostdata->caps.migration.ecl = 1;
991
992 if (client_reserve) {
993 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
994 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
995 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
996 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
997 req->common.length = sizeof(hostdata->caps);
998 } else
999 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
1000
1001 spin_lock_irqsave(hostdata->host->host_lock, flags);
1002 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1003 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1004 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1005};
1006
1007/**
1008 * fast_fail_rsp: - Handle response to MAD enable fast fail
1009 * @evt_struct: srp_event_struct with the response
1010 *
1011 * Used as a "done" callback by when sending enable fast fail. Gets called
1012 * by ibmvscsi_handle_crq()
1013 */
1014static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1015{
1016 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1017 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
1018
1019 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1020 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1021 else if (status == VIOSRP_MAD_FAILED)
1022 dev_err(hostdata->dev, "fast_fail request failed\n");
1023 else if (status != VIOSRP_MAD_SUCCESS)
1024 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1025
1026 send_mad_capabilities(hostdata);
1027}
1028
1029/**
1030 * init_host - Start host initialization
1031 * @hostdata: ibmvscsi_host_data of host
1032 *
1033 * Returns zero if successful.
1034 */
1035static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1036{
1037 int rc;
1038 unsigned long flags;
1039 struct viosrp_fast_fail *fast_fail_mad;
1040 struct srp_event_struct *evt_struct;
1041
1042 if (!fast_fail) {
1043 send_mad_capabilities(hostdata);
1044 return 0;
1045 }
1046
1047 evt_struct = get_event_struct(&hostdata->pool);
1048 BUG_ON(!evt_struct);
1049
1050 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1051
1052 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1053 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1054 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
1055 fast_fail_mad->common.length = sizeof(*fast_fail_mad);
1056
1057 spin_lock_irqsave(hostdata->host->host_lock, flags);
1058 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1059 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1060 return rc;
1061}
1062
1063/**
1064 * adapter_info_rsp: - Handle response to MAD adapter info request
1065 * @evt_struct: srp_event_struct with the response
1066 *
1067 * Used as a "done" callback by when sending adapter_info. Gets called
1068 * by ibmvscsi_handle_crq()
1069*/
1070static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1071{
1072 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1073
1074 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1075 dev_err(hostdata->dev, "error %d getting adapter info\n",
1076 evt_struct->xfer_iu->mad.adapter_info.common.status);
1077 } else {
1078 dev_info(hostdata->dev, "host srp version: %s, "
1079 "host partition %s (%d), OS %d, max io %u\n",
1080 hostdata->madapter_info.srp_version,
1081 hostdata->madapter_info.partition_name,
1082 hostdata->madapter_info.partition_number,
1083 hostdata->madapter_info.os_type,
1084 hostdata->madapter_info.port_max_txu[0]);
1085
1086 if (hostdata->madapter_info.port_max_txu[0])
1087 hostdata->host->max_sectors =
1088 hostdata->madapter_info.port_max_txu[0] >> 9;
1089
1090 if (hostdata->madapter_info.os_type == 3 &&
1091 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1092 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1093 hostdata->madapter_info.srp_version);
1094 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 }
1098 }
1099
1100 enable_fast_fail(hostdata);
1101}
1102
1103/**
1104 * send_mad_adapter_info: - Sends the mad adapter info request
1105 * and stores the result so it can be retrieved with
1106 * sysfs. We COULD consider causing a failure if the
1107 * returned SRP version doesn't match ours.
1108 * @hostdata: ibmvscsi_host_data of host
1109 *
1110 * Returns zero if successful.
1111*/
1112static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1113{
1114 struct viosrp_adapter_info *req;
1115 struct srp_event_struct *evt_struct;
1116 unsigned long flags;
1117
1118 evt_struct = get_event_struct(&hostdata->pool);
1119 BUG_ON(!evt_struct);
1120
1121 init_event_struct(evt_struct,
1122 adapter_info_rsp,
1123 VIOSRP_MAD_FORMAT,
1124 info_timeout);
1125
1126 req = &evt_struct->iu.mad.adapter_info;
1127 memset(req, 0x00, sizeof(*req));
1128
1129 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
1130 req->common.length = sizeof(hostdata->madapter_info);
1131 req->buffer = hostdata->adapter_info_addr;
1132
1133 spin_lock_irqsave(hostdata->host->host_lock, flags);
1134 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1135 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1136 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1137};
1138
1139/**
1140 * init_adapter: Start virtual adapter initialization sequence
1141 *
1142 */
1143static void init_adapter(struct ibmvscsi_host_data *hostdata)
1144{
1145 send_mad_adapter_info(hostdata);
1146}
1147
1148/**
972 * sync_completion: Signal that a synchronous command has completed 1149 * sync_completion: Signal that a synchronous command has completed
973 * Note that after returning from this call, the evt_struct is freed. 1150 * Note that after returning from this call, the evt_struct is freed.
974 * the caller waiting on this completion shouldn't touch the evt_struct 1151 * the caller waiting on this completion shouldn't touch the evt_struct
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1029 init_event_struct(evt, 1206 init_event_struct(evt,
1030 sync_completion, 1207 sync_completion,
1031 VIOSRP_SRP_FORMAT, 1208 VIOSRP_SRP_FORMAT,
1032 init_timeout); 1209 abort_timeout);
1033 1210
1034 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1211 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1035 1212
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1043 evt->sync_srp = &srp_rsp; 1220 evt->sync_srp = &srp_rsp;
1044 1221
1045 init_completion(&evt->comp); 1222 init_completion(&evt->comp);
1046 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1223 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1047 1224
1048 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1225 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1049 break; 1226 break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1152 init_event_struct(evt, 1329 init_event_struct(evt,
1153 sync_completion, 1330 sync_completion,
1154 VIOSRP_SRP_FORMAT, 1331 VIOSRP_SRP_FORMAT,
1155 init_timeout); 1332 reset_timeout);
1156 1333
1157 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1334 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1158 1335
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1165 evt->sync_srp = &srp_rsp; 1342 evt->sync_srp = &srp_rsp;
1166 1343
1167 init_completion(&evt->comp); 1344 init_completion(&evt->comp);
1168 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1345 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1169 1346
1170 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1347 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1171 break; 1348 break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1281 if ((rc = ibmvscsi_ops->send_crq(hostdata, 1458 if ((rc = ibmvscsi_ops->send_crq(hostdata,
1282 0xC002000000000000LL, 0)) == 0) { 1459 0xC002000000000000LL, 0)) == 0) {
1283 /* Now login */ 1460 /* Now login */
1284 send_srp_login(hostdata); 1461 init_adapter(hostdata);
1285 } else { 1462 } else {
1286 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); 1463 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1287 } 1464 }
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1291 dev_info(hostdata->dev, "partner initialization complete\n"); 1468 dev_info(hostdata->dev, "partner initialization complete\n");
1292 1469
1293 /* Now login */ 1470 /* Now login */
1294 send_srp_login(hostdata); 1471 init_adapter(hostdata);
1295 break; 1472 break;
1296 default: 1473 default:
1297 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); 1474 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1303 if (crq->format == 0x06) { 1480 if (crq->format == 0x06) {
1304 /* We need to re-setup the interpartition connection */ 1481 /* We need to re-setup the interpartition connection */
1305 dev_info(hostdata->dev, "Re-enabling adapter!\n"); 1482 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1483 hostdata->client_migrated = 1;
1306 purge_requests(hostdata, DID_REQUEUE); 1484 purge_requests(hostdata, DID_REQUEUE);
1307 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, 1485 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
1308 hostdata)) || 1486 hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1397 init_event_struct(evt_struct, 1575 init_event_struct(evt_struct,
1398 sync_completion, 1576 sync_completion,
1399 VIOSRP_MAD_FORMAT, 1577 VIOSRP_MAD_FORMAT,
1400 init_timeout); 1578 info_timeout);
1401 1579
1402 host_config = &evt_struct->iu.mad.host_config; 1580 host_config = &evt_struct->iu.mad.host_config;
1403 1581
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1419 1597
1420 init_completion(&evt_struct->comp); 1598 init_completion(&evt_struct->comp);
1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1599 spin_lock_irqsave(hostdata->host->host_lock, flags);
1422 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 1600 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1423 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1601 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1424 if (rc == 0) 1602 if (rc == 0)
1425 wait_for_completion(&evt_struct->comp); 1603 wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1444 spin_lock_irqsave(shost->host_lock, lock_flags); 1622 spin_lock_irqsave(shost->host_lock, lock_flags);
1445 if (sdev->type == TYPE_DISK) { 1623 if (sdev->type == TYPE_DISK) {
1446 sdev->allow_restart = 1; 1624 sdev->allow_restart = 1;
1447 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1625 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1448 } 1626 }
1449 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1627 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1450 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1628 spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1471/* ------------------------------------------------------------ 1649/* ------------------------------------------------------------
1472 * sysfs attributes 1650 * sysfs attributes
1473 */ 1651 */
1652static ssize_t show_host_vhost_loc(struct device *dev,
1653 struct device_attribute *attr, char *buf)
1654{
1655 struct Scsi_Host *shost = class_to_shost(dev);
1656 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1657 int len;
1658
1659 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1660 hostdata->caps.loc);
1661 return len;
1662}
1663
1664static struct device_attribute ibmvscsi_host_vhost_loc = {
1665 .attr = {
1666 .name = "vhost_loc",
1667 .mode = S_IRUGO,
1668 },
1669 .show = show_host_vhost_loc,
1670};
1671
1672static ssize_t show_host_vhost_name(struct device *dev,
1673 struct device_attribute *attr, char *buf)
1674{
1675 struct Scsi_Host *shost = class_to_shost(dev);
1676 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1677 int len;
1678
1679 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1680 hostdata->caps.name);
1681 return len;
1682}
1683
1684static struct device_attribute ibmvscsi_host_vhost_name = {
1685 .attr = {
1686 .name = "vhost_name",
1687 .mode = S_IRUGO,
1688 },
1689 .show = show_host_vhost_name,
1690};
1691
1474static ssize_t show_host_srp_version(struct device *dev, 1692static ssize_t show_host_srp_version(struct device *dev,
1475 struct device_attribute *attr, char *buf) 1693 struct device_attribute *attr, char *buf)
1476{ 1694{
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
1594}; 1812};
1595 1813
1596static struct device_attribute *ibmvscsi_attrs[] = { 1814static struct device_attribute *ibmvscsi_attrs[] = {
1815 &ibmvscsi_host_vhost_loc,
1816 &ibmvscsi_host_vhost_name,
1597 &ibmvscsi_host_srp_version, 1817 &ibmvscsi_host_srp_version,
1598 &ibmvscsi_host_partition_name, 1818 &ibmvscsi_host_partition_name,
1599 &ibmvscsi_host_partition_number, 1819 &ibmvscsi_host_partition_number,
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1674 atomic_set(&hostdata->request_limit, -1); 1894 atomic_set(&hostdata->request_limit, -1);
1675 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1895 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1676 1896
1897 if (map_persist_bufs(hostdata)) {
1898 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
1899 goto persist_bufs_failed;
1900 }
1901
1677 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 1902 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1678 if (rc != 0 && rc != H_RESOURCE) { 1903 if (rc != 0 && rc != H_RESOURCE) {
1679 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1904 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1687 host->max_lun = 8; 1912 host->max_lun = 8;
1688 host->max_id = max_id; 1913 host->max_id = max_id;
1689 host->max_channel = max_channel; 1914 host->max_channel = max_channel;
1915 host->max_cmd_len = 16;
1690 1916
1691 if (scsi_add_host(hostdata->host, hostdata->dev)) 1917 if (scsi_add_host(hostdata->host, hostdata->dev))
1692 goto add_host_failed; 1918 goto add_host_failed;
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1733 init_pool_failed: 1959 init_pool_failed:
1734 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 1960 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1735 init_crq_failed: 1961 init_crq_failed:
1962 unmap_persist_bufs(hostdata);
1963 persist_bufs_failed:
1736 scsi_host_put(host); 1964 scsi_host_put(host);
1737 scsi_host_alloc_failed: 1965 scsi_host_alloc_failed:
1738 return -1; 1966 return -1;
@@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1741static int ibmvscsi_remove(struct vio_dev *vdev) 1969static int ibmvscsi_remove(struct vio_dev *vdev)
1742{ 1970{
1743 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1971 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1972 unmap_persist_bufs(hostdata);
1744 release_event_pool(&hostdata->pool, hostdata); 1973 release_event_pool(&hostdata->pool, hostdata);
1745 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1746 max_events); 1975 max_events);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 2d4339d5e16e..76425303def0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
90/* all driver data associated with a host adapter */ 90/* all driver data associated with a host adapter */
91struct ibmvscsi_host_data { 91struct ibmvscsi_host_data {
92 atomic_t request_limit; 92 atomic_t request_limit;
93 int client_migrated;
93 struct device *dev; 94 struct device *dev;
94 struct event_pool pool; 95 struct event_pool pool;
95 struct crq_queue queue; 96 struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
97 struct list_head sent; 98 struct list_head sent;
98 struct Scsi_Host *host; 99 struct Scsi_Host *host;
99 struct mad_adapter_info_data madapter_info; 100 struct mad_adapter_info_data madapter_info;
101 struct capabilities caps;
102 dma_addr_t caps_addr;
103 dma_addr_t adapter_info_addr;
100}; 104};
101 105
102/* routines for managing a command/response queue */ 106/* routines for managing a command/response queue */
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 204604501ad8..2cd735d1d196 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -37,6 +37,7 @@
37 37
38#define SRP_VERSION "16.a" 38#define SRP_VERSION "16.a"
39#define SRP_MAX_IU_LEN 256 39#define SRP_MAX_IU_LEN 256
40#define SRP_MAX_LOC_LEN 32
40 41
41union srp_iu { 42union srp_iu {
42 struct srp_login_req login_req; 43 struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
86 VIOSRP_EMPTY_IU_TYPE = 0x01, 87 VIOSRP_EMPTY_IU_TYPE = 0x01,
87 VIOSRP_ERROR_LOG_TYPE = 0x02, 88 VIOSRP_ERROR_LOG_TYPE = 0x02,
88 VIOSRP_ADAPTER_INFO_TYPE = 0x03, 89 VIOSRP_ADAPTER_INFO_TYPE = 0x03,
89 VIOSRP_HOST_CONFIG_TYPE = 0x04 90 VIOSRP_HOST_CONFIG_TYPE = 0x04,
91 VIOSRP_CAPABILITIES_TYPE = 0x05,
92 VIOSRP_ENABLE_FAST_FAIL = 0x08,
93};
94
95enum viosrp_mad_status {
96 VIOSRP_MAD_SUCCESS = 0x00,
97 VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
98 VIOSRP_MAD_FAILED = 0xF7,
99};
100
101enum viosrp_capability_type {
102 MIGRATION_CAPABILITIES = 0x01,
103 RESERVATION_CAPABILITIES = 0x02,
104};
105
106enum viosrp_capability_support {
107 SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
108 SERVER_SUPPORTS_CAP = 0x01,
109 SERVER_CAP_DATA = 0x02,
110};
111
112enum viosrp_reserve_type {
113 CLIENT_RESERVE_SCSI_2 = 0x01,
114};
115
116enum viosrp_capability_flag {
117 CLIENT_MIGRATED = 0x01,
118 CLIENT_RECONNECT = 0x02,
119 CAP_LIST_SUPPORTED = 0x04,
120 CAP_LIST_DATA = 0x08,
90}; 121};
91 122
92/* 123/*
@@ -127,11 +158,46 @@ struct viosrp_host_config {
127 u64 buffer; 158 u64 buffer;
128}; 159};
129 160
161struct viosrp_fast_fail {
162 struct mad_common common;
163};
164
165struct viosrp_capabilities {
166 struct mad_common common;
167 u64 buffer;
168};
169
170struct mad_capability_common {
171 u32 cap_type;
172 u16 length;
173 u16 server_support;
174};
175
176struct mad_reserve_cap {
177 struct mad_capability_common common;
178 u32 type;
179};
180
181struct mad_migration_cap {
182 struct mad_capability_common common;
183 u32 ecl;
184};
185
186struct capabilities{
187 u32 flags;
188 char name[SRP_MAX_LOC_LEN];
189 char loc[SRP_MAX_LOC_LEN];
190 struct mad_migration_cap migration;
191 struct mad_reserve_cap reserve;
192};
193
130union mad_iu { 194union mad_iu {
131 struct viosrp_empty_iu empty_iu; 195 struct viosrp_empty_iu empty_iu;
132 struct viosrp_error_log error_log; 196 struct viosrp_error_log error_log;
133 struct viosrp_adapter_info adapter_info; 197 struct viosrp_adapter_info adapter_info;
134 struct viosrp_host_config host_config; 198 struct viosrp_host_config host_config;
199 struct viosrp_fast_fail fast_fail;
200 struct viosrp_capabilities capabilities;
135}; 201};
136 202
137union viosrp_iu { 203union viosrp_iu {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dd689ded8609..0f8bc772b112 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
7003 ioa_cfg->sdt_state = ABORT_DUMP; 7003 ioa_cfg->sdt_state = ABORT_DUMP;
7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7005 ioa_cfg->in_ioa_bringdown = 1; 7005 ioa_cfg->in_ioa_bringdown = 1;
7006 ioa_cfg->allow_cmds = 0;
7006 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7007 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 7008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7008} 7009}
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
7688 * Return value: 7689 * Return value:
7689 * none 7690 * none
7690 **/ 7691 **/
7691static void ipr_remove(struct pci_dev *pdev) 7692static void __devexit ipr_remove(struct pci_dev *pdev)
7692{ 7693{
7693 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 7694 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7694 7695
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
7864 .name = IPR_NAME, 7865 .name = IPR_NAME,
7865 .id_table = ipr_pci_table, 7866 .id_table = ipr_pci_table,
7866 .probe = ipr_probe, 7867 .probe = ipr_probe,
7867 .remove = ipr_remove, 7868 .remove = __devexit_p(ipr_remove),
7868 .shutdown = ipr_shutdown, 7869 .shutdown = ipr_shutdown,
7869 .err_handler = &ipr_err_handler, 7870 .err_handler = &ipr_err_handler,
7870}; 7871};
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 992af05aacf1..7af9bceb8aa9 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1159 atomic_inc(&mp->stats.xid_not_found); 1159 atomic_inc(&mp->stats.xid_not_found);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 if (ep->esb_stat & ESB_ST_COMPLETE) {
1163 atomic_inc(&mp->stats.xid_not_found);
1164 goto out;
1165 }
1162 if (ep->rxid == FC_XID_UNKNOWN) 1166 if (ep->rxid == FC_XID_UNKNOWN)
1163 ep->rxid = ntohs(fh->fh_rx_id); 1167 ep->rxid = ntohs(fh->fh_rx_id);
1164 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1168 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 521f996f9b13..ad8b747837b0 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1897 break; 1897 break;
1898 case FC_CMD_ABORTED: 1898 case FC_CMD_ABORTED:
1899 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1899 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1900 break; 1900 break;
1901 case FC_CMD_TIME_OUT: 1901 case FC_CMD_TIME_OUT:
1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 747d73c5c8af..7bfbff7e0efb 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
478 if (PTR_ERR(fp) == -FC_EX_CLOSED) 478 if (PTR_ERR(fp) == -FC_EX_CLOSED)
479 return fc_rport_error(rport, fp); 479 return fc_rport_error(rport, fp);
480 480
481 if (rdata->retries < rdata->local_port->max_retry_count) { 481 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", 482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
483 PTR_ERR(fp), fc_rport_state(rport)); 483 PTR_ERR(fp), fc_rport_state(rport));
484 rdata->retries++; 484 rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
1330} 1330}
1331EXPORT_SYMBOL(fc_rport_init); 1331EXPORT_SYMBOL(fc_rport_init);
1332 1332
1333int fc_setup_rport() 1333int fc_setup_rport(void)
1334{ 1334{
1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); 1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1336 if (!rport_event_queue) 1336 if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
1339} 1339}
1340EXPORT_SYMBOL(fc_setup_rport); 1340EXPORT_SYMBOL(fc_setup_rport);
1341 1341
1342void fc_destroy_rport() 1342void fc_destroy_rport(void)
1343{ 1343{
1344 destroy_workqueue(rport_event_queue); 1344 destroy_workqueue(rport_event_queue);
1345} 1345}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e72b4ad47d35..59908aead531 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
81 struct Scsi_Host *shost = conn->session->host; 81 struct Scsi_Host *shost = conn->session->host;
82 struct iscsi_host *ihost = shost_priv(shost); 82 struct iscsi_host *ihost = shost_priv(shost);
83 83
84 queue_work(ihost->workq, &conn->xmitwork); 84 if (ihost->workq)
85 queue_work(ihost->workq, &conn->xmitwork);
85} 86}
86EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); 87EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
87 88
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
109 * if the window closed with IO queued, then kick the 110 * if the window closed with IO queued, then kick the
110 * xmit thread 111 * xmit thread
111 */ 112 */
112 if (!list_empty(&session->leadconn->xmitqueue) || 113 if (!list_empty(&session->leadconn->cmdqueue) ||
113 !list_empty(&session->leadconn->mgmtqueue)) { 114 !list_empty(&session->leadconn->mgmtqueue))
114 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 115 iscsi_conn_queue_work(session->leadconn);
115 iscsi_conn_queue_work(session->leadconn);
116 }
117 } 116 }
118} 117}
119EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 118EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
257 itt_t itt; 256 itt_t itt;
258 int rc; 257 int rc;
259 258
260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 259 if (conn->session->tt->alloc_pdu) {
261 if (rc) 260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
262 return rc; 261 if (rc)
262 return rc;
263 }
263 hdr = (struct iscsi_cmd *) task->hdr; 264 hdr = (struct iscsi_cmd *) task->hdr;
264 itt = hdr->itt; 265 itt = hdr->itt;
265 memset(hdr, 0, sizeof(*hdr)); 266 memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
364 return -EIO; 365 return -EIO;
365 366
366 task->state = ISCSI_TASK_RUNNING; 367 task->state = ISCSI_TASK_RUNNING;
367 list_move_tail(&task->running, &conn->run_list);
368 368
369 conn->scsicmd_pdus_cnt++; 369 conn->scsicmd_pdus_cnt++;
370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
380} 380}
381 381
382/** 382/**
383 * iscsi_complete_command - finish a task 383 * iscsi_free_task - free a task
384 * @task: iscsi cmd task 384 * @task: iscsi cmd task
385 * 385 *
386 * Must be called with session lock. 386 * Must be called with session lock.
387 * This function returns the scsi command to scsi-ml or cleans 387 * This function returns the scsi command to scsi-ml or cleans
388 * up mgmt tasks then returns the task to the pool. 388 * up mgmt tasks then returns the task to the pool.
389 */ 389 */
390static void iscsi_complete_command(struct iscsi_task *task) 390static void iscsi_free_task(struct iscsi_task *task)
391{ 391{
392 struct iscsi_conn *conn = task->conn; 392 struct iscsi_conn *conn = task->conn;
393 struct iscsi_session *session = conn->session; 393 struct iscsi_session *session = conn->session;
394 struct scsi_cmnd *sc = task->sc; 394 struct scsi_cmnd *sc = task->sc;
395 395
396 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
397 task->itt, task->state, task->sc);
398
396 session->tt->cleanup_task(task); 399 session->tt->cleanup_task(task);
397 list_del_init(&task->running); 400 task->state = ISCSI_TASK_FREE;
398 task->state = ISCSI_TASK_COMPLETED;
399 task->sc = NULL; 401 task->sc = NULL;
400
401 if (conn->task == task)
402 conn->task = NULL;
403 /* 402 /*
404 * login task is preallocated so do not free 403 * login task is preallocated so do not free
405 */ 404 */
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
408 407
409 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 408 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
410 409
411 if (conn->ping_task == task)
412 conn->ping_task = NULL;
413
414 if (sc) { 410 if (sc) {
415 task->sc = NULL; 411 task->sc = NULL;
416 /* SCSI eh reuses commands to verify us */ 412 /* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
433static void __iscsi_put_task(struct iscsi_task *task) 429static void __iscsi_put_task(struct iscsi_task *task)
434{ 430{
435 if (atomic_dec_and_test(&task->refcount)) 431 if (atomic_dec_and_test(&task->refcount))
436 iscsi_complete_command(task); 432 iscsi_free_task(task);
437} 433}
438 434
439void iscsi_put_task(struct iscsi_task *task) 435void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
446} 442}
447EXPORT_SYMBOL_GPL(iscsi_put_task); 443EXPORT_SYMBOL_GPL(iscsi_put_task);
448 444
445/**
446 * iscsi_complete_task - finish a task
447 * @task: iscsi cmd task
448 * @state: state to complete task with
449 *
450 * Must be called with session lock.
451 */
452static void iscsi_complete_task(struct iscsi_task *task, int state)
453{
454 struct iscsi_conn *conn = task->conn;
455
456 ISCSI_DBG_SESSION(conn->session,
457 "complete task itt 0x%x state %d sc %p\n",
458 task->itt, task->state, task->sc);
459 if (task->state == ISCSI_TASK_COMPLETED ||
460 task->state == ISCSI_TASK_ABRT_TMF ||
461 task->state == ISCSI_TASK_ABRT_SESS_RECOV)
462 return;
463 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
464 task->state = state;
465
466 if (!list_empty(&task->running))
467 list_del_init(&task->running);
468
469 if (conn->task == task)
470 conn->task = NULL;
471
472 if (conn->ping_task == task)
473 conn->ping_task = NULL;
474
475 /* release get from queueing */
476 __iscsi_put_task(task);
477}
478
449/* 479/*
450 * session lock must be held 480 * session lock must be held and if not called for a task that is
481 * still pending or from the xmit thread, then xmit thread must
482 * be suspended.
451 */ 483 */
452static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 484static void fail_scsi_task(struct iscsi_task *task, int err)
453 int err)
454{ 485{
486 struct iscsi_conn *conn = task->conn;
455 struct scsi_cmnd *sc; 487 struct scsi_cmnd *sc;
488 int state;
456 489
490 /*
491 * if a command completes and we get a successful tmf response
492 * we will hit this because the scsi eh abort code does not take
493 * a ref to the task.
494 */
457 sc = task->sc; 495 sc = task->sc;
458 if (!sc) 496 if (!sc)
459 return; 497 return;
460 498
461 if (task->state == ISCSI_TASK_PENDING) 499 if (task->state == ISCSI_TASK_PENDING) {
462 /* 500 /*
463 * cmd never made it to the xmit thread, so we should not count 501 * cmd never made it to the xmit thread, so we should not count
464 * the cmd in the sequencing 502 * the cmd in the sequencing
465 */ 503 */
466 conn->session->queued_cmdsn--; 504 conn->session->queued_cmdsn--;
505 /* it was never sent so just complete like normal */
506 state = ISCSI_TASK_COMPLETED;
507 } else if (err == DID_TRANSPORT_DISRUPTED)
508 state = ISCSI_TASK_ABRT_SESS_RECOV;
509 else
510 state = ISCSI_TASK_ABRT_TMF;
467 511
468 sc->result = err; 512 sc->result = err << 16;
469 if (!scsi_bidi_cmnd(sc)) 513 if (!scsi_bidi_cmnd(sc))
470 scsi_set_resid(sc, scsi_bufflen(sc)); 514 scsi_set_resid(sc, scsi_bufflen(sc));
471 else { 515 else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
473 scsi_in(sc)->resid = scsi_in(sc)->length; 517 scsi_in(sc)->resid = scsi_in(sc)->length;
474 } 518 }
475 519
476 if (conn->task == task) 520 iscsi_complete_task(task, state);
477 conn->task = NULL;
478 /* release ref from queuecommand */
479 __iscsi_put_task(task);
480} 521}
481 522
482static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 523static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
516 session->state = ISCSI_STATE_LOGGING_OUT; 557 session->state = ISCSI_STATE_LOGGING_OUT;
517 558
518 task->state = ISCSI_TASK_RUNNING; 559 task->state = ISCSI_TASK_RUNNING;
519 list_move_tail(&task->running, &conn->mgmt_run_list);
520 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " 560 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
521 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, 561 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
522 hdr->itt, task->data_count); 562 hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
528 char *data, uint32_t data_size) 568 char *data, uint32_t data_size)
529{ 569{
530 struct iscsi_session *session = conn->session; 570 struct iscsi_session *session = conn->session;
571 struct iscsi_host *ihost = shost_priv(session->host);
531 struct iscsi_task *task; 572 struct iscsi_task *task;
532 itt_t itt; 573 itt_t itt;
533 574
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
544 */ 585 */
545 task = conn->login_task; 586 task = conn->login_task;
546 else { 587 else {
588 if (session->state != ISCSI_STATE_LOGGED_IN)
589 return NULL;
590
547 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 591 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
548 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 592 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
549 593
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
559 atomic_set(&task->refcount, 1); 603 atomic_set(&task->refcount, 1);
560 task->conn = conn; 604 task->conn = conn;
561 task->sc = NULL; 605 task->sc = NULL;
606 INIT_LIST_HEAD(&task->running);
607 task->state = ISCSI_TASK_PENDING;
562 608
563 if (data_size) { 609 if (data_size) {
564 memcpy(task->data, data, data_size); 610 memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
566 } else 612 } else
567 task->data_count = 0; 613 task->data_count = 0;
568 614
569 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { 615 if (conn->session->tt->alloc_pdu) {
570 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " 616 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
571 "pdu for mgmt task.\n"); 617 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
572 goto requeue_task; 618 "pdu for mgmt task.\n");
619 goto free_task;
620 }
573 } 621 }
622
574 itt = task->hdr->itt; 623 itt = task->hdr->itt;
575 task->hdr_len = sizeof(struct iscsi_hdr); 624 task->hdr_len = sizeof(struct iscsi_hdr);
576 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 625 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
583 task->conn->session->age); 632 task->conn->session->age);
584 } 633 }
585 634
586 INIT_LIST_HEAD(&task->running); 635 if (!ihost->workq) {
587 list_add_tail(&task->running, &conn->mgmtqueue);
588
589 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
590 if (iscsi_prep_mgmt_task(conn, task)) 636 if (iscsi_prep_mgmt_task(conn, task))
591 goto free_task; 637 goto free_task;
592 638
593 if (session->tt->xmit_task(task)) 639 if (session->tt->xmit_task(task))
594 goto free_task; 640 goto free_task;
595 641 } else {
596 } else 642 list_add_tail(&task->running, &conn->mgmtqueue);
597 iscsi_conn_queue_work(conn); 643 iscsi_conn_queue_work(conn);
644 }
598 645
599 return task; 646 return task;
600 647
601free_task: 648free_task:
602 __iscsi_put_task(task); 649 __iscsi_put_task(task);
603 return NULL; 650 return NULL;
604
605requeue_task:
606 if (task != conn->login_task)
607 __kfifo_put(session->cmdpool.queue, (void*)&task,
608 sizeof(void*));
609 return NULL;
610} 651}
611 652
612int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 653int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
701 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 742 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
702 } 743 }
703out: 744out:
704 ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", 745 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
705 sc, sc->result, task->itt); 746 sc, sc->result, task->itt);
706 conn->scsirsp_pdus_cnt++; 747 conn->scsirsp_pdus_cnt++;
707 748 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
708 __iscsi_put_task(task);
709} 749}
710 750
711/** 751/**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
724 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 764 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
725 return; 765 return;
726 766
767 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
727 sc->result = (DID_OK << 16) | rhdr->cmd_status; 768 sc->result = (DID_OK << 16) | rhdr->cmd_status;
728 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 769 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
729 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 770 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
738 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 779 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
739 } 780 }
740 781
782 ISCSI_DBG_SESSION(conn->session, "data in with status done "
783 "[sc %p res %d itt 0x%x]\n",
784 sc, sc->result, task->itt);
741 conn->scsirsp_pdus_cnt++; 785 conn->scsirsp_pdus_cnt++;
742 __iscsi_put_task(task); 786 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
743} 787}
744 788
745static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 789static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
823 * 867 *
824 * The session lock must be held. 868 * The session lock must be held.
825 */ 869 */
826static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 870struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
827{ 871{
828 struct iscsi_session *session = conn->session; 872 struct iscsi_session *session = conn->session;
829 int i; 873 int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
840 884
841 return session->cmds[i]; 885 return session->cmds[i];
842} 886}
887EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
843 888
844/** 889/**
845 * __iscsi_complete_pdu - complete pdu 890 * __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
959 } 1004 }
960 1005
961 iscsi_tmf_rsp(conn, hdr); 1006 iscsi_tmf_rsp(conn, hdr);
962 __iscsi_put_task(task); 1007 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
963 break; 1008 break;
964 case ISCSI_OP_NOOP_IN: 1009 case ISCSI_OP_NOOP_IN:
965 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1010 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
977 goto recv_pdu; 1022 goto recv_pdu;
978 1023
979 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 1024 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
980 __iscsi_put_task(task); 1025 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
981 break; 1026 break;
982 default: 1027 default:
983 rc = ISCSI_ERR_BAD_OPCODE; 1028 rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
989recv_pdu: 1034recv_pdu:
990 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1035 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
991 rc = ISCSI_ERR_CONN_FAILED; 1036 rc = ISCSI_ERR_CONN_FAILED;
992 __iscsi_put_task(task); 1037 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
993 return rc; 1038 return rc;
994} 1039}
995EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 1040EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
1166{ 1211{
1167 struct iscsi_conn *conn = task->conn; 1212 struct iscsi_conn *conn = task->conn;
1168 1213
1169 list_move_tail(&task->running, &conn->requeue); 1214 /*
1215 * this may be on the requeue list already if the xmit_task callout
1216 * is handling the r2ts while we are adding new ones
1217 */
1218 if (list_empty(&task->running))
1219 list_add_tail(&task->running, &conn->requeue);
1170 iscsi_conn_queue_work(conn); 1220 iscsi_conn_queue_work(conn);
1171} 1221}
1172EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1222EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
1206 while (!list_empty(&conn->mgmtqueue)) { 1256 while (!list_empty(&conn->mgmtqueue)) {
1207 conn->task = list_entry(conn->mgmtqueue.next, 1257 conn->task = list_entry(conn->mgmtqueue.next,
1208 struct iscsi_task, running); 1258 struct iscsi_task, running);
1259 list_del_init(&conn->task->running);
1209 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1260 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1210 __iscsi_put_task(conn->task); 1261 __iscsi_put_task(conn->task);
1211 conn->task = NULL; 1262 conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
1217 } 1268 }
1218 1269
1219 /* process pending command queue */ 1270 /* process pending command queue */
1220 while (!list_empty(&conn->xmitqueue)) { 1271 while (!list_empty(&conn->cmdqueue)) {
1221 if (conn->tmf_state == TMF_QUEUED) 1272 if (conn->tmf_state == TMF_QUEUED)
1222 break; 1273 break;
1223 1274
1224 conn->task = list_entry(conn->xmitqueue.next, 1275 conn->task = list_entry(conn->cmdqueue.next,
1225 struct iscsi_task, running); 1276 struct iscsi_task, running);
1277 list_del_init(&conn->task->running);
1226 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1278 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1227 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1279 fail_scsi_task(conn->task, DID_IMM_RETRY);
1228 continue; 1280 continue;
1229 } 1281 }
1230 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1282 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1231 if (rc) { 1283 if (rc) {
1232 if (rc == -ENOMEM) { 1284 if (rc == -ENOMEM) {
1285 list_add_tail(&conn->task->running,
1286 &conn->cmdqueue);
1233 conn->task = NULL; 1287 conn->task = NULL;
1234 goto again; 1288 goto again;
1235 } else 1289 } else
1236 fail_command(conn, conn->task, DID_ABORT << 16); 1290 fail_scsi_task(conn->task, DID_ABORT);
1237 continue; 1291 continue;
1238 } 1292 }
1239 rc = iscsi_xmit_task(conn); 1293 rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
1260 1314
1261 conn->task = list_entry(conn->requeue.next, 1315 conn->task = list_entry(conn->requeue.next,
1262 struct iscsi_task, running); 1316 struct iscsi_task, running);
1317 list_del_init(&conn->task->running);
1263 conn->task->state = ISCSI_TASK_RUNNING; 1318 conn->task->state = ISCSI_TASK_RUNNING;
1264 list_move_tail(conn->requeue.next, &conn->run_list);
1265 rc = iscsi_xmit_task(conn); 1319 rc = iscsi_xmit_task(conn);
1266 if (rc) 1320 if (rc)
1267 goto again; 1321 goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1328{ 1382{
1329 struct iscsi_cls_session *cls_session; 1383 struct iscsi_cls_session *cls_session;
1330 struct Scsi_Host *host; 1384 struct Scsi_Host *host;
1385 struct iscsi_host *ihost;
1331 int reason = 0; 1386 int reason = 0;
1332 struct iscsi_session *session; 1387 struct iscsi_session *session;
1333 struct iscsi_conn *conn; 1388 struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1338 sc->SCp.ptr = NULL; 1393 sc->SCp.ptr = NULL;
1339 1394
1340 host = sc->device->host; 1395 host = sc->device->host;
1396 ihost = shost_priv(host);
1341 spin_unlock(host->host_lock); 1397 spin_unlock(host->host_lock);
1342 1398
1343 cls_session = starget_to_session(scsi_target(sc->device)); 1399 cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1350 goto fault; 1406 goto fault;
1351 } 1407 }
1352 1408
1353 /* 1409 if (session->state != ISCSI_STATE_LOGGED_IN) {
1354 * ISCSI_STATE_FAILED is a temp. state. The recovery
1355 * code will decide what is best to do with command queued
1356 * during this time
1357 */
1358 if (session->state != ISCSI_STATE_LOGGED_IN &&
1359 session->state != ISCSI_STATE_FAILED) {
1360 /* 1410 /*
1361 * to handle the race between when we set the recovery state 1411 * to handle the race between when we set the recovery state
1362 * and block the session we requeue here (commands could 1412 * and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1364 * up because the block code is not locked) 1414 * up because the block code is not locked)
1365 */ 1415 */
1366 switch (session->state) { 1416 switch (session->state) {
1417 case ISCSI_STATE_FAILED:
1367 case ISCSI_STATE_IN_RECOVERY: 1418 case ISCSI_STATE_IN_RECOVERY:
1368 reason = FAILURE_SESSION_IN_RECOVERY; 1419 reason = FAILURE_SESSION_IN_RECOVERY;
1369 goto reject; 1420 sc->result = DID_IMM_RETRY << 16;
1421 break;
1370 case ISCSI_STATE_LOGGING_OUT: 1422 case ISCSI_STATE_LOGGING_OUT:
1371 reason = FAILURE_SESSION_LOGGING_OUT; 1423 reason = FAILURE_SESSION_LOGGING_OUT;
1372 goto reject; 1424 sc->result = DID_IMM_RETRY << 16;
1425 break;
1373 case ISCSI_STATE_RECOVERY_FAILED: 1426 case ISCSI_STATE_RECOVERY_FAILED:
1374 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1427 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1375 sc->result = DID_TRANSPORT_FAILFAST << 16; 1428 sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1402 reason = FAILURE_OOM; 1455 reason = FAILURE_OOM;
1403 goto reject; 1456 goto reject;
1404 } 1457 }
1405 list_add_tail(&task->running, &conn->xmitqueue);
1406 1458
1407 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1459 if (!ihost->workq) {
1408 reason = iscsi_prep_scsi_cmd_pdu(task); 1460 reason = iscsi_prep_scsi_cmd_pdu(task);
1409 if (reason) { 1461 if (reason) {
1410 if (reason == -ENOMEM) { 1462 if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1419 reason = FAILURE_SESSION_NOT_READY; 1471 reason = FAILURE_SESSION_NOT_READY;
1420 goto prepd_reject; 1472 goto prepd_reject;
1421 } 1473 }
1422 } else 1474 } else {
1475 list_add_tail(&task->running, &conn->cmdqueue);
1423 iscsi_conn_queue_work(conn); 1476 iscsi_conn_queue_work(conn);
1477 }
1424 1478
1425 session->queued_cmdsn++; 1479 session->queued_cmdsn++;
1426 spin_unlock(&session->lock); 1480 spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1429 1483
1430prepd_reject: 1484prepd_reject:
1431 sc->scsi_done = NULL; 1485 sc->scsi_done = NULL;
1432 iscsi_complete_command(task); 1486 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1433reject: 1487reject:
1434 spin_unlock(&session->lock); 1488 spin_unlock(&session->lock);
1435 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1489 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
1439 1493
1440prepd_fault: 1494prepd_fault:
1441 sc->scsi_done = NULL; 1495 sc->scsi_done = NULL;
1442 iscsi_complete_command(task); 1496 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1443fault: 1497fault:
1444 spin_unlock(&session->lock); 1498 spin_unlock(&session->lock);
1445 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1499 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1608 * Fail commands. session lock held and recv side suspended and xmit 1662 * Fail commands. session lock held and recv side suspended and xmit
1609 * thread flushed 1663 * thread flushed
1610 */ 1664 */
1611static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1665static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1612 int error) 1666 int error)
1613{ 1667{
1614 struct iscsi_task *task, *tmp; 1668 struct iscsi_task *task;
1615 1669 int i;
1616 if (conn->task) {
1617 if (lun == -1 ||
1618 (conn->task->sc && conn->task->sc->device->lun == lun))
1619 conn->task = NULL;
1620 }
1621 1670
1622 /* flush pending */ 1671 for (i = 0; i < conn->session->cmds_max; i++) {
1623 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1672 task = conn->session->cmds[i];
1624 if (lun == task->sc->device->lun || lun == -1) { 1673 if (!task->sc || task->state == ISCSI_TASK_FREE)
1625 ISCSI_DBG_SESSION(conn->session, 1674 continue;
1626 "failing pending sc %p itt 0x%x\n",
1627 task->sc, task->itt);
1628 fail_command(conn, task, error << 16);
1629 }
1630 }
1631 1675
1632 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1676 if (lun != -1 && lun != task->sc->device->lun)
1633 if (lun == task->sc->device->lun || lun == -1) { 1677 continue;
1634 ISCSI_DBG_SESSION(conn->session,
1635 "failing requeued sc %p itt 0x%x\n",
1636 task->sc, task->itt);
1637 fail_command(conn, task, error << 16);
1638 }
1639 }
1640 1678
1641 /* fail all other running */ 1679 ISCSI_DBG_SESSION(conn->session,
1642 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1680 "failing sc %p itt 0x%x state %d\n",
1643 if (lun == task->sc->device->lun || lun == -1) { 1681 task->sc, task->itt, task->state);
1644 ISCSI_DBG_SESSION(conn->session, 1682 fail_scsi_task(task, error);
1645 "failing in progress sc %p itt 0x%x\n",
1646 task->sc, task->itt);
1647 fail_command(conn, task, error << 16);
1648 }
1649 } 1683 }
1650} 1684}
1651 1685
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
1655 struct iscsi_host *ihost = shost_priv(shost); 1689 struct iscsi_host *ihost = shost_priv(shost);
1656 1690
1657 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1691 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1658 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1692 if (ihost->workq)
1659 flush_workqueue(ihost->workq); 1693 flush_workqueue(ihost->workq);
1660} 1694}
1661EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1695EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1663static void iscsi_start_tx(struct iscsi_conn *conn) 1697static void iscsi_start_tx(struct iscsi_conn *conn)
1664{ 1698{
1665 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1699 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1666 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1700 iscsi_conn_queue_work(conn);
1667 iscsi_conn_queue_work(conn); 1701}
1702
1703/*
1704 * We want to make sure a ping is in flight. It has timed out.
1705 * And we are not busy processing a pdu that is making
1706 * progress but got started before the ping and is taking a while
1707 * to complete so the ping is just stuck behind it in a queue.
1708 */
1709static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1710{
1711 if (conn->ping_task &&
1712 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1713 (conn->ping_timeout * HZ), jiffies))
1714 return 1;
1715 else
1716 return 0;
1668} 1717}
1669 1718
1670static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1719static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1702 * if the ping timedout then we are in the middle of cleaning up 1751 * if the ping timedout then we are in the middle of cleaning up
1703 * and can let the iscsi eh handle it 1752 * and can let the iscsi eh handle it
1704 */ 1753 */
1705 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1754 if (iscsi_has_ping_timed_out(conn)) {
1706 (conn->ping_timeout * HZ), jiffies))
1707 rc = BLK_EH_RESET_TIMER; 1755 rc = BLK_EH_RESET_TIMER;
1756 goto done;
1757 }
1708 /* 1758 /*
1709 * if we are about to check the transport then give the command 1759 * if we are about to check the transport then give the command
1710 * more time 1760 * more time
1711 */ 1761 */
1712 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1762 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1713 jiffies)) 1763 jiffies)) {
1714 rc = BLK_EH_RESET_TIMER; 1764 rc = BLK_EH_RESET_TIMER;
1765 goto done;
1766 }
1767
1715 /* if in the middle of checking the transport then give us more time */ 1768 /* if in the middle of checking the transport then give us more time */
1716 if (conn->ping_task) 1769 if (conn->ping_task)
1717 rc = BLK_EH_RESET_TIMER; 1770 rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1738 1791
1739 recv_timeout *= HZ; 1792 recv_timeout *= HZ;
1740 last_recv = conn->last_recv; 1793 last_recv = conn->last_recv;
1741 if (conn->ping_task && 1794
1742 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1795 if (iscsi_has_ping_timed_out(conn)) {
1743 jiffies)) {
1744 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1796 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1745 "expired, last rx %lu, last ping %lu, " 1797 "expired, recv timeout %d, last rx %lu, "
1746 "now %lu\n", conn->ping_timeout, last_recv, 1798 "last ping %lu, now %lu\n",
1747 conn->last_ping, jiffies); 1799 conn->ping_timeout, conn->recv_timeout,
1800 last_recv, conn->last_ping, jiffies);
1748 spin_unlock(&session->lock); 1801 spin_unlock(&session->lock);
1749 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1802 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1750 return; 1803 return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1788 cls_session = starget_to_session(scsi_target(sc->device)); 1841 cls_session = starget_to_session(scsi_target(sc->device));
1789 session = cls_session->dd_data; 1842 session = cls_session->dd_data;
1790 1843
1844 ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
1845
1791 mutex_lock(&session->eh_mutex); 1846 mutex_lock(&session->eh_mutex);
1792 spin_lock_bh(&session->lock); 1847 spin_lock_bh(&session->lock);
1793 /* 1848 /*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1810 sc->SCp.phase != session->age) { 1865 sc->SCp.phase != session->age) {
1811 spin_unlock_bh(&session->lock); 1866 spin_unlock_bh(&session->lock);
1812 mutex_unlock(&session->eh_mutex); 1867 mutex_unlock(&session->eh_mutex);
1868 ISCSI_DBG_SESSION(session, "failing abort due to dropped "
1869 "session.\n");
1813 return FAILED; 1870 return FAILED;
1814 } 1871 }
1815 1872
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1829 } 1886 }
1830 1887
1831 if (task->state == ISCSI_TASK_PENDING) { 1888 if (task->state == ISCSI_TASK_PENDING) {
1832 fail_command(conn, task, DID_ABORT << 16); 1889 fail_scsi_task(task, DID_ABORT);
1833 goto success; 1890 goto success;
1834 } 1891 }
1835 1892
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1860 * then sent more data for the cmd. 1917 * then sent more data for the cmd.
1861 */ 1918 */
1862 spin_lock(&session->lock); 1919 spin_lock(&session->lock);
1863 fail_command(conn, task, DID_ABORT << 16); 1920 fail_scsi_task(task, DID_ABORT);
1864 conn->tmf_state = TMF_INITIAL; 1921 conn->tmf_state = TMF_INITIAL;
1865 spin_unlock(&session->lock); 1922 spin_unlock(&session->lock);
1866 iscsi_start_tx(conn); 1923 iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1967 iscsi_suspend_tx(conn); 2024 iscsi_suspend_tx(conn);
1968 2025
1969 spin_lock_bh(&session->lock); 2026 spin_lock_bh(&session->lock);
1970 fail_all_commands(conn, sc->device->lun, DID_ERROR); 2027 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
1971 conn->tmf_state = TMF_INITIAL; 2028 conn->tmf_state = TMF_INITIAL;
1972 spin_unlock_bh(&session->lock); 2029 spin_unlock_bh(&session->lock);
1973 2030
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2274 if (cmd_task_size) 2331 if (cmd_task_size)
2275 task->dd_data = &task[1]; 2332 task->dd_data = &task[1];
2276 task->itt = cmd_i; 2333 task->itt = cmd_i;
2334 task->state = ISCSI_TASK_FREE;
2277 INIT_LIST_HEAD(&task->running); 2335 INIT_LIST_HEAD(&task->running);
2278 } 2336 }
2279 2337
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2360 conn->transport_timer.data = (unsigned long)conn; 2418 conn->transport_timer.data = (unsigned long)conn;
2361 conn->transport_timer.function = iscsi_check_transport_timeouts; 2419 conn->transport_timer.function = iscsi_check_transport_timeouts;
2362 2420
2363 INIT_LIST_HEAD(&conn->run_list);
2364 INIT_LIST_HEAD(&conn->mgmt_run_list);
2365 INIT_LIST_HEAD(&conn->mgmtqueue); 2421 INIT_LIST_HEAD(&conn->mgmtqueue);
2366 INIT_LIST_HEAD(&conn->xmitqueue); 2422 INIT_LIST_HEAD(&conn->cmdqueue);
2367 INIT_LIST_HEAD(&conn->requeue); 2423 INIT_LIST_HEAD(&conn->requeue);
2368 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2424 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2369 2425
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2531EXPORT_SYMBOL_GPL(iscsi_conn_start); 2587EXPORT_SYMBOL_GPL(iscsi_conn_start);
2532 2588
2533static void 2589static void
2534flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2590fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
2535{ 2591{
2536 struct iscsi_task *task, *tmp; 2592 struct iscsi_task *task;
2593 int i, state;
2537 2594
2538 /* handle pending */ 2595 for (i = 0; i < conn->session->cmds_max; i++) {
2539 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2596 task = conn->session->cmds[i];
2540 ISCSI_DBG_SESSION(session, "flushing pending mgmt task " 2597 if (task->sc)
2541 "itt 0x%x\n", task->itt); 2598 continue;
2542 /* release ref from prep task */
2543 __iscsi_put_task(task);
2544 }
2545 2599
2546 /* handle running */ 2600 if (task->state == ISCSI_TASK_FREE)
2547 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2601 continue;
2548 ISCSI_DBG_SESSION(session, "flushing running mgmt task " 2602
2549 "itt 0x%x\n", task->itt); 2603 ISCSI_DBG_SESSION(conn->session,
2550 /* release ref from prep task */ 2604 "failing mgmt itt 0x%x state %d\n",
2551 __iscsi_put_task(task); 2605 task->itt, task->state);
2552 } 2606 state = ISCSI_TASK_ABRT_SESS_RECOV;
2607 if (task->state == ISCSI_TASK_PENDING)
2608 state = ISCSI_TASK_COMPLETED;
2609 iscsi_complete_task(task, state);
2553 2610
2554 conn->task = NULL; 2611 }
2555} 2612}
2556 2613
2557static void iscsi_start_session_recovery(struct iscsi_session *session, 2614static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2559{ 2616{
2560 int old_stop_stage; 2617 int old_stop_stage;
2561 2618
2562 del_timer_sync(&conn->transport_timer);
2563
2564 mutex_lock(&session->eh_mutex); 2619 mutex_lock(&session->eh_mutex);
2565 spin_lock_bh(&session->lock); 2620 spin_lock_bh(&session->lock);
2566 if (conn->stop_stage == STOP_CONN_TERM) { 2621 if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2578 session->state = ISCSI_STATE_TERMINATE; 2633 session->state = ISCSI_STATE_TERMINATE;
2579 else if (conn->stop_stage != STOP_CONN_RECOVER) 2634 else if (conn->stop_stage != STOP_CONN_RECOVER)
2580 session->state = ISCSI_STATE_IN_RECOVERY; 2635 session->state = ISCSI_STATE_IN_RECOVERY;
2636 spin_unlock_bh(&session->lock);
2637
2638 del_timer_sync(&conn->transport_timer);
2639 iscsi_suspend_tx(conn);
2581 2640
2641 spin_lock_bh(&session->lock);
2582 old_stop_stage = conn->stop_stage; 2642 old_stop_stage = conn->stop_stage;
2583 conn->stop_stage = flag; 2643 conn->stop_stage = flag;
2584 conn->c_stage = ISCSI_CONN_STOPPED; 2644 conn->c_stage = ISCSI_CONN_STOPPED;
2585 spin_unlock_bh(&session->lock); 2645 spin_unlock_bh(&session->lock);
2586 2646
2587 iscsi_suspend_tx(conn);
2588 /* 2647 /*
2589 * for connection level recovery we should not calculate 2648 * for connection level recovery we should not calculate
2590 * header digest. conn->hdr_size used for optimization 2649 * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2605 * flush queues. 2664 * flush queues.
2606 */ 2665 */
2607 spin_lock_bh(&session->lock); 2666 spin_lock_bh(&session->lock);
2608 if (flag == STOP_CONN_RECOVER) 2667 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
2609 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2668 fail_mgmt_tasks(session, conn);
2610 else
2611 fail_all_commands(conn, -1, DID_ERROR);
2612 flush_control_queues(session, conn);
2613 spin_unlock_bh(&session->lock); 2669 spin_unlock_bh(&session->lock);
2614 mutex_unlock(&session->eh_mutex); 2670 mutex_unlock(&session->eh_mutex);
2615} 2671}
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2651} 2707}
2652EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2708EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2653 2709
2710static int iscsi_switch_str_param(char **param, char *new_val_buf)
2711{
2712 char *new_val;
2713
2714 if (*param) {
2715 if (!strcmp(*param, new_val_buf))
2716 return 0;
2717 }
2718
2719 new_val = kstrdup(new_val_buf, GFP_NOIO);
2720 if (!new_val)
2721 return -ENOMEM;
2722
2723 kfree(*param);
2724 *param = new_val;
2725 return 0;
2726}
2654 2727
2655int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2728int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2656 enum iscsi_param param, char *buf, int buflen) 2729 enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2723 sscanf(buf, "%u", &conn->exp_statsn); 2796 sscanf(buf, "%u", &conn->exp_statsn);
2724 break; 2797 break;
2725 case ISCSI_PARAM_USERNAME: 2798 case ISCSI_PARAM_USERNAME:
2726 kfree(session->username); 2799 return iscsi_switch_str_param(&session->username, buf);
2727 session->username = kstrdup(buf, GFP_KERNEL);
2728 if (!session->username)
2729 return -ENOMEM;
2730 break;
2731 case ISCSI_PARAM_USERNAME_IN: 2800 case ISCSI_PARAM_USERNAME_IN:
2732 kfree(session->username_in); 2801 return iscsi_switch_str_param(&session->username_in, buf);
2733 session->username_in = kstrdup(buf, GFP_KERNEL);
2734 if (!session->username_in)
2735 return -ENOMEM;
2736 break;
2737 case ISCSI_PARAM_PASSWORD: 2802 case ISCSI_PARAM_PASSWORD:
2738 kfree(session->password); 2803 return iscsi_switch_str_param(&session->password, buf);
2739 session->password = kstrdup(buf, GFP_KERNEL);
2740 if (!session->password)
2741 return -ENOMEM;
2742 break;
2743 case ISCSI_PARAM_PASSWORD_IN: 2804 case ISCSI_PARAM_PASSWORD_IN:
2744 kfree(session->password_in); 2805 return iscsi_switch_str_param(&session->password_in, buf);
2745 session->password_in = kstrdup(buf, GFP_KERNEL);
2746 if (!session->password_in)
2747 return -ENOMEM;
2748 break;
2749 case ISCSI_PARAM_TARGET_NAME: 2806 case ISCSI_PARAM_TARGET_NAME:
2750 /* this should not change between logins */ 2807 return iscsi_switch_str_param(&session->targetname, buf);
2751 if (session->targetname)
2752 break;
2753
2754 session->targetname = kstrdup(buf, GFP_KERNEL);
2755 if (!session->targetname)
2756 return -ENOMEM;
2757 break;
2758 case ISCSI_PARAM_TPGT: 2808 case ISCSI_PARAM_TPGT:
2759 sscanf(buf, "%d", &session->tpgt); 2809 sscanf(buf, "%d", &session->tpgt);
2760 break; 2810 break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2762 sscanf(buf, "%d", &conn->persistent_port); 2812 sscanf(buf, "%d", &conn->persistent_port);
2763 break; 2813 break;
2764 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2814 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2765 /* 2815 return iscsi_switch_str_param(&conn->persistent_address, buf);
2766 * this is the address returned in discovery so it should
2767 * not change between logins.
2768 */
2769 if (conn->persistent_address)
2770 break;
2771
2772 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2773 if (!conn->persistent_address)
2774 return -ENOMEM;
2775 break;
2776 case ISCSI_PARAM_IFACE_NAME: 2816 case ISCSI_PARAM_IFACE_NAME:
2777 if (!session->ifacename) 2817 return iscsi_switch_str_param(&session->ifacename, buf);
2778 session->ifacename = kstrdup(buf, GFP_KERNEL);
2779 break;
2780 case ISCSI_PARAM_INITIATOR_NAME: 2818 case ISCSI_PARAM_INITIATOR_NAME:
2781 if (!session->initiatorname) 2819 return iscsi_switch_str_param(&session->initiatorname, buf);
2782 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2783 break;
2784 default: 2820 default:
2785 return -ENOSYS; 2821 return -ENOSYS;
2786 } 2822 }
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2851 len = sprintf(buf, "%s\n", session->ifacename); 2887 len = sprintf(buf, "%s\n", session->ifacename);
2852 break; 2888 break;
2853 case ISCSI_PARAM_INITIATOR_NAME: 2889 case ISCSI_PARAM_INITIATOR_NAME:
2854 if (!session->initiatorname) 2890 len = sprintf(buf, "%s\n", session->initiatorname);
2855 len = sprintf(buf, "%s\n", "unknown");
2856 else
2857 len = sprintf(buf, "%s\n", session->initiatorname);
2858 break; 2891 break;
2859 default: 2892 default:
2860 return -ENOSYS; 2893 return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2920 2953
2921 switch (param) { 2954 switch (param) {
2922 case ISCSI_HOST_PARAM_NETDEV_NAME: 2955 case ISCSI_HOST_PARAM_NETDEV_NAME:
2923 if (!ihost->netdev) 2956 len = sprintf(buf, "%s\n", ihost->netdev);
2924 len = sprintf(buf, "%s\n", "default");
2925 else
2926 len = sprintf(buf, "%s\n", ihost->netdev);
2927 break; 2957 break;
2928 case ISCSI_HOST_PARAM_HWADDRESS: 2958 case ISCSI_HOST_PARAM_HWADDRESS:
2929 if (!ihost->hwaddress) 2959 len = sprintf(buf, "%s\n", ihost->hwaddress);
2930 len = sprintf(buf, "%s\n", "default");
2931 else
2932 len = sprintf(buf, "%s\n", ihost->hwaddress);
2933 break; 2960 break;
2934 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2961 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2935 if (!ihost->initiatorname) 2962 len = sprintf(buf, "%s\n", ihost->initiatorname);
2936 len = sprintf(buf, "%s\n", "unknown");
2937 else
2938 len = sprintf(buf, "%s\n", ihost->initiatorname);
2939 break; 2963 break;
2940 case ISCSI_HOST_PARAM_IPADDRESS: 2964 case ISCSI_HOST_PARAM_IPADDRESS:
2941 if (!strlen(ihost->local_address)) 2965 len = sprintf(buf, "%s\n", ihost->local_address);
2942 len = sprintf(buf, "%s\n", "unknown");
2943 else
2944 len = sprintf(buf, "%s\n",
2945 ihost->local_address);
2946 break; 2966 break;
2947 default: 2967 default:
2948 return -ENOSYS; 2968 return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2959 2979
2960 switch (param) { 2980 switch (param) {
2961 case ISCSI_HOST_PARAM_NETDEV_NAME: 2981 case ISCSI_HOST_PARAM_NETDEV_NAME:
2962 if (!ihost->netdev) 2982 return iscsi_switch_str_param(&ihost->netdev, buf);
2963 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2964 break;
2965 case ISCSI_HOST_PARAM_HWADDRESS: 2983 case ISCSI_HOST_PARAM_HWADDRESS:
2966 if (!ihost->hwaddress) 2984 return iscsi_switch_str_param(&ihost->hwaddress, buf);
2967 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2968 break;
2969 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2985 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2970 if (!ihost->initiatorname) 2986 return iscsi_switch_str_param(&ihost->initiatorname, buf);
2971 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2972 break;
2973 default: 2987 default:
2974 return -ENOSYS; 2988 return -ENOSYS;
2975 } 2989 }
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index b579ca9f4836..2bc07090321d 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
440 struct iscsi_tcp_task *tcp_task = task->dd_data; 440 struct iscsi_tcp_task *tcp_task = task->dd_data;
441 struct iscsi_r2t_info *r2t; 441 struct iscsi_r2t_info *r2t;
442 442
443 /* nothing to do for mgmt or pending tasks */ 443 /* nothing to do for mgmt */
444 if (!task->sc || task->state == ISCSI_TASK_PENDING) 444 if (!task->sc)
445 return; 445 return;
446 446
447 /* flush task's r2t queues */ 447 /* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
473 int datasn = be32_to_cpu(rhdr->datasn); 473 int datasn = be32_to_cpu(rhdr->datasn);
474 unsigned total_in_length = scsi_in(task->sc)->length; 474 unsigned total_in_length = scsi_in(task->sc)->length;
475 475
476 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); 476 /*
477 * lib iscsi will update this in the completion handling if there
478 * is status.
479 */
480 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
481 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
482
477 if (tcp_conn->in.datalen == 0) 483 if (tcp_conn->in.datalen == 0)
478 return 0; 484 return 0;
479 485
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
857 int rc = 0; 863 int rc = 0;
858 864
859 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); 865 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
866 /*
867 * Update for each skb instead of pdu, because over slow networks a
868 * data_in's data could take a while to read in. We also want to
869 * account for r2ts.
870 */
871 conn->last_recv = jiffies;
860 872
861 if (unlikely(conn->suspend_rx)) { 873 if (unlikely(conn->suspend_rx)) {
862 ISCSI_DBG_TCP(conn, "Rx suspended!\n"); 874 ISCSI_DBG_TCP(conn, "Rx suspended!\n");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111ba..540569849099 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t,
467 struct lpfc_iocbq *, uint32_t);
468 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
469 struct lpfc_iocbq *);
470 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
471
472
473 IOCB_t * (*lpfc_get_iocb_from_iocbq)
474 (struct lpfc_iocbq *);
475 void (*lpfc_scsi_cmd_iocb_cmpl)
476 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
477
478 /* MBOX interface function jump table entries */
479 int (*lpfc_sli_issue_mbox)
480 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
481 /* Slow-path IOCB process function jump table entries */
482 void (*lpfc_sli_handle_slow_ring_event)
483 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
484 uint32_t mask);
485 /* INIT device interface function jump table entries */
486 int (*lpfc_sli_hbq_to_firmware)
487 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
488 int (*lpfc_sli_brdrestart)
489 (struct lpfc_hba *);
490 int (*lpfc_sli_brdready)
491 (struct lpfc_hba *, uint32_t);
492 void (*lpfc_handle_eratt)
493 (struct lpfc_hba *);
494 void (*lpfc_stop_port)
495 (struct lpfc_hba *);
496
497
498 /* SLI4 specific HBA data structure */
499 struct lpfc_sli4_hba sli4_hba;
500
423 struct lpfc_sli sli; 501 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 502 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
503 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 504 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 505#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 506#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 508#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 509#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 510#define LPFC_SLI3_BG_ENABLED 0x20
511#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 512 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 513 uint32_t iocb_rsp_size;
434 514
@@ -442,8 +522,13 @@ struct lpfc_hba {
442 522
443 uint32_t hba_flag; /* hba generic flags */ 523 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 524#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 525#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 526#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
527#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
528#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
529#define FCP_XRI_ABORT_EVENT 0x20
530#define ELS_XRI_ABORT_EVENT 0x40
531#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 532 struct lpfc_dmabuf slim2p;
448 533
449 MAILBOX_t *mbox; 534 MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 587 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 588 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 589 uint32_t cfg_use_msi;
590 uint32_t cfg_fcp_imax;
591 uint32_t cfg_fcp_wq_count;
592 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 593 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 594 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 595 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 599 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 600 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 601 uint32_t cfg_enable_bg;
602 uint32_t cfg_enable_fip;
603 uint32_t cfg_log_verbose;
514 604
515 lpfc_vpd_t vpd; /* vital product data */ 605 lpfc_vpd_t vpd; /* vital product data */
516 606
@@ -526,11 +616,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 616 unsigned long data_flags;
527 617
528 uint32_t hbq_in_use; /* HBQs in use flag */ 618 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 619 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 620 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 621 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 622
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 623 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
624 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 625 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 626 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 627 PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 684 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 685 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 686 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 687 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
688 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 689 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 690
599 mempool_t *mbox_mem_pool; 691 mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 701 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 702 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 703#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
704 uint16_t max_vports; /*
705 * For IOV HBAs max_vpi can change
706 * after a reset. max_vports is max
707 * number of vports present. This can
708 * be greater than max_vpi.
709 */
710 uint16_t vpi_base;
711 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 712 unsigned long *vpi_bmask; /* vpi allocation table */
613 713
614 /* Data structure used by fabric iocb scheduler */ 714 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 767/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 768#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 769 atomic_t fast_event_count;
770 struct lpfc_fcf fcf;
771 uint8_t fc_map[3];
772 uint8_t valid_vlan;
773 uint16_t vlan_id;
774 struct list_head fcf_conn_rec_list;
670}; 775};
671 776
672static inline struct Scsi_Host * 777static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb125..d73e677201f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3278 }
3200 } 3279 }
3201 3280
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3281 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3282 buf, count);
3204 3283
3205 phba->sysfs_mbox.offset = off + count; 3284 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3320 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3321 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3322 int rc;
3323 MAILBOX_t *pmb;
3244 3324
3245 if (off > MAILBOX_CMD_SIZE) 3325 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3326 return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3345 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3346 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3347 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3348 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3349 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3350 /* Offline only */
3271 case MBX_INIT_LINK: 3351 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3352 case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3363 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3364 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3365 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3366 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3367 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3369 return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3399 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3400 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3401 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3402 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3403 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3404 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3405 return -EPERM;
3326 default: 3406 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3411 return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3415 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3416 */
3337 if (phba->pport->stopped && 3417 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3418 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3419 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3420 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3421 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3422 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3423 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3424 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3425 pmb->mbxCommand);
3346 3426
3347 phba->sysfs_mbox.mbox->vport = vport; 3427 phba->sysfs_mbox.mbox->vport = vport;
3348 3428
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3436 }
3357 3437
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3438 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3439 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3440
3361 spin_unlock_irq(&phba->hbalock); 3441 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3442 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3448 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3449 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3450 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3451 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3452 spin_lock_irq(&phba->hbalock);
3374 } 3453 }
3375 3454
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3470 return -EAGAIN;
3392 } 3471 }
3393 3472
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3473 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3474
3396 phba->sysfs_mbox.offset = off + count; 3475 phba->sysfs_mbox.offset = off + count;
3397 3476
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3664 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3665 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3666 break;
3667 case LA_10GHZ_LINK:
3668 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3669 break;
3588 default: 3670 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3671 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3672 break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3734 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3735 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3736 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3737 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3738 return NULL;
3657 3739
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3740 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3745 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3746 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3747
3666 pmb = &pmboxq->mb; 3748 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3749 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3750 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3751 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3752 pmboxq->vport = vport;
3671 3753
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3754 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3755 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3756 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3757 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3758 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3777 pmboxq->vport = vport;
3696 3778
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3779 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3780 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3782 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3851 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3852 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3853
3772 pmb = &pmboxq->mb; 3854 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3855 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3856 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3857 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3859 pmboxq->vport = vport;
3778 3860
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3861 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3862 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3863 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3864 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3865 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3877 pmboxq->vport = vport;
3796 3878
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3879 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3880 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3881 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3882 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3883 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4044 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4045}
3964 4046
4047/**
4048 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4049 * @phba: Pointer to lpfc_hba struct.
4050 *
4051 * This function is called by the lpfc_get_cfgparam() routine to set the
4052 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4053 * log messsage according to the module's lpfc_log_verbose parameter setting
4054 * before hba port or vport created.
4055 **/
4056static void
4057lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4058{
4059 phba->cfg_log_verbose = verbose;
4060}
4061
3965struct fc_function_template lpfc_transport_functions = { 4062struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4063 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4064 .show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4202 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4203 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4204 lpfc_use_msi_init(phba, lpfc_use_msi);
4205 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4206 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4207 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4208 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4209 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4210 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4213 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4214 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4215 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4216 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4217 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4218 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4219
4136 return; 4220 return;
4137} 4221}
4138 4222
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f26190..d2a922997c0f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e5..1dbccfd3d022 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (vp->rev.rBit) {
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1737 rev = vp->rev.sli2FwRev;
1735 else 1738 else
1736 rev = vp->rev.sli1FwRev; 1739 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1759 }
1757 b4 = (rev & 0x0000000f); 1760 b4 = (rev & 0x0000000f);
1758 1761
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1762 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1763 fwname = vp->rev.sli2FwName;
1761 else 1764 else
1762 fwname = vp->rev.sli1FwName; 1765 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07a..2b02b1fb39a0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 282 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 283 struct hbq_dmabuf *hbq_buf;
282 284
285 if (phba->sli_rev != 3)
286 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 287 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 288 spin_lock_irq(&phba->hbalock);
285 289
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 493 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 494 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 495 }
492 word0 = readl(phba->HAregaddr); 496
493 word1 = readl(phba->CAregaddr); 497 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 498 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 499 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 500 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 501 word3 = readl(phba->HCregaddr);
502 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
503 "HC:%08x\n", word0, word1, word2, word3);
504 }
498 spin_unlock_irq(&phba->hbalock); 505 spin_unlock_irq(&phba->hbalock);
499 return len; 506 return len;
500} 507}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd108972072..1142070e9484 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d2..6bdeb14878a2 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 222 icmd->un.elsreq64.myID = vport->fc_myDID;
220 223
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 224 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 225 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 226 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 227 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 228 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 308 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 309 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 310 **/
308static int 311int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 312lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 313{
311 struct lpfc_hba *phba = vport->phba; 314 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 348 err = 4;
346 goto fail; 349 goto fail;
347 } 350 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 351 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 352 if (rc) {
351 err = 5; 353 err = 5;
352 goto fail_free_mbox; 354 goto fail_free_mbox;
@@ -386,6 +388,75 @@ fail:
386} 388}
387 389
388/** 390/**
391 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
392 * @vport: pointer to a host virtual N_Port data structure.
393 *
394 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
395 * the @vport. This mailbox command is necessary for FCoE only.
396 *
397 * Return code
398 * 0 - successfully issued REG_VFI for @vport
399 * A failure code otherwise.
400 **/
401static int
402lpfc_issue_reg_vfi(struct lpfc_vport *vport)
403{
404 struct lpfc_hba *phba = vport->phba;
405 LPFC_MBOXQ_t *mboxq;
406 struct lpfc_nodelist *ndlp;
407 struct serv_parm *sp;
408 struct lpfc_dmabuf *dmabuf;
409 int rc = 0;
410
411 sp = &phba->fc_fabparam;
412 ndlp = lpfc_findnode_did(vport, Fabric_DID);
413 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
414 rc = -ENODEV;
415 goto fail;
416 }
417
418 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
419 if (!dmabuf) {
420 rc = -ENOMEM;
421 goto fail;
422 }
423 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
424 if (!dmabuf->virt) {
425 rc = -ENOMEM;
426 goto fail_free_dmabuf;
427 }
428 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
429 if (!mboxq) {
430 rc = -ENOMEM;
431 goto fail_free_coherent;
432 }
433 vport->port_state = LPFC_FABRIC_CFG_LINK;
434 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
435 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
436 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
437 mboxq->vport = vport;
438 mboxq->context1 = dmabuf;
439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
440 if (rc == MBX_NOT_FINISHED) {
441 rc = -ENXIO;
442 goto fail_free_mbox;
443 }
444 return 0;
445
446fail_free_mbox:
447 mempool_free(mboxq, phba->mbox_mem_pool);
448fail_free_coherent:
449 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
450fail_free_dmabuf:
451 kfree(dmabuf);
452fail:
453 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
454 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
455 "0289 Issue Register VFI failed: Err %d\n", rc);
456 return rc;
457}
458
459/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 460 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 461 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 462 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 568 }
498 } 569 }
499 570
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 571 if (phba->sli_rev < LPFC_SLI_REV4) {
501 572 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 573 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 574 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 575 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 576 else
577 lpfc_issue_fabric_reglogin(vport);
578 } else {
579 ndlp->nlp_type |= NLP_FABRIC;
580 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
581 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
582 lpfc_start_fdiscs(phba);
583 lpfc_do_scr_ns_plogi(phba, vport);
584 } else
585 lpfc_issue_reg_vfi(vport);
506 } 586 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 587 return 0;
509} 588}
510
511/** 589/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 590 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 591 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 893 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 894 sp->cmn.fcphHigh = FC_PH3;
817 895
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 896 if (phba->sli_rev == LPFC_SLI_REV4) {
897 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
898 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
899 /* FLOGI needs to be 3 for WQE FCFI */
900 /* Set the fcfi to the fcfi we registered with */
901 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
902 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 903 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 904 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 905 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 906 icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1013 if (!ndlp)
931 return 0; 1014 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1015 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1016 /* Set the node type */
1017 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1018 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1019 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1020 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1435 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1436 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1437 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1438 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1439 uint8_t *pcmd;
1356 uint16_t cmdsize; 1440 uint16_t cmdsize;
1357 int ret; 1441 int ret;
1358 1442
1359 psli = &phba->sli; 1443 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1444
1362 ndlp = lpfc_findnode_did(vport, did); 1445 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1446 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1474
1392 phba->fc_stat.elsXmitPLOGI++; 1475 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1476 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1477 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1478
1396 if (ret == IOCB_ERROR) { 1479 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1480 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1584 PRLI *npr;
1502 IOCB_t *icmd; 1585 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1586 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1587 uint8_t *pcmd;
1507 uint16_t cmdsize; 1588 uint16_t cmdsize;
1508 1589
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1590 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1591 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1592 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1628 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1629 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1630 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1631 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1632 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1633 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1634 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1635 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1687 * and continue discovery.
1609 */ 1688 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1690 !(vport->fc_flag & FC_RSCN_MODE) &&
1691 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1692 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1693 return;
1614 } 1694 }
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1868 ADISC *ap;
1789 IOCB_t *icmd; 1869 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1870 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1871 uint8_t *pcmd;
1794 uint16_t cmdsize; 1872 uint16_t cmdsize;
1795 1873
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1900 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1901 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1902 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1903 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1904 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1905 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1906 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1907 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2016 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2017 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2018 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2019 uint8_t *pcmd;
1943 uint16_t cmdsize; 2020 uint16_t cmdsize;
1944 int rc; 2021 int rc;
1945 2022
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2023 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2024 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2025 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2052 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2053 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2054 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2055 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2056
1983 if (rc == IOCB_ERROR) { 2057 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2058 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2132 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2133 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2134 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2135 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2136 uint8_t *pcmd;
2064 uint16_t cmdsize; 2137 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2138 struct lpfc_nodelist *ndlp;
2066 2139
2067 psli = &phba->sli; 2140 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2141 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2142
2071 ndlp = lpfc_findnode_did(vport, nportid); 2143 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2180
2109 phba->fc_stat.elsXmitSCR++; 2181 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2182 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2183 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2184 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2185 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2186 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2187 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2225 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2226 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2227 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2228 struct lpfc_sli *psli;
2157 FARP *fp; 2229 FARP *fp;
2158 uint8_t *pcmd; 2230 uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2234 struct lpfc_nodelist *ndlp;
2163 2235
2164 psli = &phba->sli; 2236 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2237 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2238
2168 ndlp = lpfc_findnode_did(vport, nportid); 2239 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2290
2220 phba->fc_stat.elsXmitFARPR++; 2291 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2292 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2293 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2294 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2295 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2296 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2297 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3021 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3022 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3023
3024 /*
3025 * This routine is used to register and unregister in previous SLI
3026 * modes.
3027 */
3028 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3029 (phba->sli_rev == LPFC_SLI_REV4))
3030 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3031
2952 pmb->context1 = NULL; 3032 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3033 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3034 kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3041 */
2962 lpfc_nlp_not_used(ndlp); 3042 lpfc_nlp_not_used(ndlp);
2963 } 3043 }
3044
2964 return; 3045 return;
2965} 3046}
2966 3047
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3251 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3252 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3253 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3254 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3255 uint8_t *pcmd;
3176 uint16_t cmdsize; 3256 uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3258 ELS_PKT *els_pkt_ptr;
3179 3259
3180 psli = &phba->sli; 3260 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3261 oldcmd = &oldiocb->iocb;
3183 3262
3184 switch (flag) { 3263 switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3345 }
3267 3346
3268 phba->fc_stat.elsXmitACC++; 3347 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3348 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3349 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3350 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3351 return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3384 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3385 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3386 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3387 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3388 uint8_t *pcmd;
3311 uint16_t cmdsize; 3389 uint16_t cmdsize;
3312 int rc; 3390 int rc;
3313 3391
3314 psli = &phba->sli; 3392 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3393 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3394 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3395 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3422
3347 phba->fc_stat.elsXmitLSRJT++; 3423 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3425 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3426
3351 if (rc == IOCB_ERROR) { 3427 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3428 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3455 struct lpfc_nodelist *ndlp)
3380{ 3456{
3381 struct lpfc_hba *phba = vport->phba; 3457 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3458 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3459 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3460 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3496
3423 phba->fc_stat.elsXmitACC++; 3497 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3498 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3499 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3500 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3501 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3502 return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3533 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3534 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3535 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3536 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3537 uint8_t *pcmd;
3465 uint16_t cmdsize; 3538 uint16_t cmdsize;
3466 int rc; 3539 int rc;
3467 3540
3468 psli = &phba->sli; 3541 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3542
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3543 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3544 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3592 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3593 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3594
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3595 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3596 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3597 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3598 return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3634 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3635 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3636 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3637 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3638 uint8_t *pcmd;
3568 uint16_t cmdsize; 3639 uint16_t cmdsize;
3569 int rc; 3640 int rc;
3570 3641
3571 psli = &phba->sli; 3642 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3643 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3644 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3645 if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3695 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3696 * it could be freed */
3628 3697
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3699 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3700 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3701 return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3908 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3909 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3910 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3911 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3912 && (ns_did.un.b.area == rscn_did.un.b.area)
3913 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3914 goto return_did_out;
3844 break; 3915 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3916 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4371 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4372 phba->cfg_topology,
4302 phba->cfg_link_speed); 4373 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4374 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4375 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4376 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4511static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4512lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4513{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4514 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4515 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4516 RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4520 uint16_t xri, status;
4452 uint32_t cmdsize; 4521 uint32_t cmdsize;
4453 4522
4454 mb = &pmb->mb; 4523 mb = &pmb->u.mb;
4455 4524
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4525 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4526 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4576 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4577 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4578 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4579 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4580 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4581 return;
4513} 4582}
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4685 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4686 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4687 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4688 uint8_t *pcmd;
4622 4689
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4690 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4721 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4722 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4723 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4724 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4725 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4726 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4727 return 1;
4660 } 4728 }
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4951 } else {
4884 /* FAN verified - skip FLOGI */ 4952 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4953 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4954 if (phba->sli_rev < LPFC_SLI_REV4)
4955 lpfc_issue_fabric_reglogin(vport);
4956 else
4957 lpfc_issue_reg_vfi(vport);
4887 } 4958 }
4888 } 4959 }
4889 return 0; 4960 return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5637
5567dropit: 5638dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5639 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5640 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5641 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5642 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5643 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5644 phba->fc_stat.elsRcvDrop++;
5575} 5645}
5576 5646
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5716 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5717 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5718 vport = phba->pport;
5649 else { 5719 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5720 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5721 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5722 }
5654 /* If there are no BDEs associated 5723 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5724 * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5850 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5852 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5853 MAILBOX_t *mb = &pmb->u.mb;
5785 5854
5786 spin_lock_irq(shost->host_lock); 5855 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5856 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5887
5819 } else { 5888 } else {
5820 if (vport == phba->pport) 5889 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5890 if (phba->sli_rev < LPFC_SLI_REV4)
5891 lpfc_issue_fabric_reglogin(vport);
5892 else
5893 lpfc_issue_reg_vfi(vport);
5822 else 5894 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5895 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5896 }
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5922
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5924 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5925 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5926 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5927 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5928 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6211{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6213 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6214 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6215 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6216 uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6240 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6241 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6242 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6243 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6244 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6245 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6246 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6247 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6296 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6297 unsigned long iflags;
6226 int ret; 6298 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6299 IOCB_t *cmd;
6229 6300
6230repeat: 6301repeat:
@@ -6248,7 +6319,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6319 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6320 iocb->vport->port_state, 0, 0);
6250 6321
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6322 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6323
6253 if (ret == IOCB_ERROR) { 6324 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6325 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6465lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6466{
6396 unsigned long iflags; 6467 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6468 int ready;
6399 int ret; 6469 int ret;
6400 6470
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6488 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6489 iocb->vport->port_state, 0, 0);
6420 6490
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6491 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6492
6423 if (ret == IOCB_ERROR) { 6493 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6494 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6595 IOERR_SLI_ABORTED);
6526} 6596}
6597
6598/**
6599 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6600 * @phba: pointer to lpfc hba data structure.
6601 * @axri: pointer to the els xri abort wcqe structure.
6602 *
6603 * This routine is invoked by the worker thread to process a SLI4 slow-path
6604 * ELS aborted xri.
6605 **/
6606void
6607lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6608 struct sli4_wcqe_xri_aborted *axri)
6609{
6610 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6611 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6612 unsigned long iflag = 0;
6613
6614 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6615 list_for_each_entry_safe(sglq_entry, sglq_next,
6616 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6617 if (sglq_entry->sli4_xritag == xri) {
6618 list_del(&sglq_entry->list);
6619 spin_unlock_irqrestore(
6620 &phba->sli4_hba.abts_sgl_list_lock,
6621 iflag);
6622 spin_lock_irqsave(&phba->hbalock, iflag);
6623
6624 list_add_tail(&sglq_entry->list,
6625 &phba->sli4_hba.lpfc_sgl_list);
6626 spin_unlock_irqrestore(&phba->hbalock, iflag);
6627 return;
6628 }
6629 }
6630 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6631}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf704..35c41ae75be2 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,592 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1203 new_fcf_record);
1204 if (phba->valid_vlan)
1205 *vlan_id = phba->vlan_id;
1206 else
1207 *vlan_id = 0xFFFF;
1208 return 1;
1209 }
1210
1211 /*
1212 * If there are no FCF connection table entry, driver connect to all
1213 * FCFs.
1214 */
1215 if (list_empty(&phba->fcf_conn_rec_list)) {
1216 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record);
1219 *vlan_id = 0xFFFF;
1220 return 1;
1221 }
1222
1223 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1224 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1225 continue;
1226
1227 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1228 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1229 new_fcf_record))
1230 continue;
1231
1232 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1233 /*
1234 * If the vlan bit map does not have the bit set for the
1235 * vlan id to be used, then it is not a match.
1236 */
1237 if (!(new_fcf_record->vlan_bitmap
1238 [conn_entry->conn_rec.vlan_tag / 8] &
1239 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1240 continue;
1241 }
1242
1243 /*
1244 * Check if the connection record specifies a required
1245 * addressing mode.
1246 */
1247 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1248 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1249
1250 /*
1251 * If SPMA required but FCF not support this continue.
1252 */
1253 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1254 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1255 new_fcf_record) & LPFC_FCF_SPMA))
1256 continue;
1257
1258 /*
1259 * If FPMA required but FCF not support this continue.
1260 */
1261 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1262 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1263 new_fcf_record) & LPFC_FCF_FPMA))
1264 continue;
1265 }
1266
1267 /*
1268 * This fcf record matches filtering criteria.
1269 */
1270 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1271 *boot_flag = 1;
1272 else
1273 *boot_flag = 0;
1274
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record);
1277 /*
1278 * If the user specified a required address mode, assign that
1279 * address mode
1280 */
1281 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1282 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1283 *addr_mode = (conn_entry->conn_rec.flags &
1284 FCFCNCT_AM_SPMA) ?
1285 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1286 /*
1287 * If the user specified a prefered address mode, use the
1288 * addr mode only if FCF support the addr_mode.
1289 */
1290 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1291 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1292 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1293 (*addr_mode & LPFC_FCF_SPMA))
1294 *addr_mode = LPFC_FCF_SPMA;
1295 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1296 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag;
1309 else
1310 *vlan_id = 0xFFFF;
1311
1312 return 1;
1313 }
1314
1315 return 0;
1316}
1317
1318/**
1319 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1320 * @phba: pointer to lpfc hba data structure.
1321 * @mboxq: pointer to mailbox object.
1322 *
1323 * This function iterate through all the fcf records available in
1324 * HBA and choose the optimal FCF record for discovery. After finding
1325 * the FCF for discovery it register the FCF record and kick start
1326 * discovery.
1327 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1328 * use a FCF record which match fabric name and mac address of the
1329 * currently used FCF record.
1330 * If the driver support only one FCF, it will try to use the FCF record
1331 * used by BOOT_BIOS.
1332 */
1333void
1334lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1335{
1336 void *virt_addr;
1337 dma_addr_t phys_addr;
1338 uint8_t *bytep;
1339 struct lpfc_mbx_sge sge;
1340 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1341 uint32_t shdr_status, shdr_add_status;
1342 union lpfc_sli4_cfg_shdr *shdr;
1343 struct fcf_record *new_fcf_record;
1344 int rc;
1345 uint32_t boot_flag, addr_mode;
1346 uint32_t next_fcf_index;
1347 unsigned long flags;
1348 uint16_t vlan_id;
1349
1350 /* Get the first SGE entry from the non-embedded DMA memory. This
1351 * routine only uses a single SGE.
1352 */
1353 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1354 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1355 if (unlikely(!mboxq->sge_array)) {
1356 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1357 "2524 Failed to get the non-embedded SGE "
1358 "virtual address\n");
1359 goto out;
1360 }
1361 virt_addr = mboxq->sge_array->addr[0];
1362
1363 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1364 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1365 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1366 &shdr->response);
1367 /*
1368 * The FCF Record was read and there is no reason for the driver
1369 * to maintain the FCF record data or memory. Instead, just need
1370 * to book keeping the FCFIs can be used.
1371 */
1372 if (shdr_status || shdr_add_status) {
1373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1374 "2521 READ_FCF_RECORD mailbox failed "
1375 "with status x%x add_status x%x, mbx\n",
1376 shdr_status, shdr_add_status);
1377 goto out;
1378 }
1379 /* Interpreting the returned information of FCF records */
1380 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1381 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1382 sizeof(struct lpfc_mbx_read_fcf_tbl));
1383 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1384
1385 new_fcf_record = (struct fcf_record *)(virt_addr +
1386 sizeof(struct lpfc_mbx_read_fcf_tbl));
1387 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1388 sizeof(struct fcf_record));
1389 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1390
1391 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1392 &boot_flag, &addr_mode,
1393 &vlan_id);
1394 /*
1395 * If the fcf record does not match with connect list entries
1396 * read the next entry.
1397 */
1398 if (!rc)
1399 goto read_next_fcf;
1400 /*
1401 * If this is not the first FCF discovery of the HBA, use last
1402 * FCF record for the discovery.
1403 */
1404 spin_lock_irqsave(&phba->hbalock, flags);
1405 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1406 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1407 new_fcf_record) &&
1408 lpfc_mac_addr_match(phba, new_fcf_record)) {
1409 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1410 spin_unlock_irqrestore(&phba->hbalock, flags);
1411 goto out;
1412 }
1413 spin_unlock_irqrestore(&phba->hbalock, flags);
1414 goto read_next_fcf;
1415 }
1416 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1417 /*
1418 * If the current FCF record does not have boot flag
1419 * set and new fcf record has boot flag set, use the
1420 * new fcf record.
1421 */
1422 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1423 /* Use this FCF record */
1424 lpfc_copy_fcf_record(phba, new_fcf_record);
1425 phba->fcf.addr_mode = addr_mode;
1426 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1427 if (vlan_id != 0xFFFF) {
1428 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1429 phba->fcf.vlan_id = vlan_id;
1430 }
1431 spin_unlock_irqrestore(&phba->hbalock, flags);
1432 goto read_next_fcf;
1433 }
1434 /*
1435 * If the current FCF record has boot flag set and the
1436 * new FCF record does not have boot flag, read the next
1437 * FCF record.
1438 */
1439 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1440 spin_unlock_irqrestore(&phba->hbalock, flags);
1441 goto read_next_fcf;
1442 }
1443 /*
1444 * If there is a record with lower priority value for
1445 * the current FCF, use that record.
1446 */
1447 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1448 && (new_fcf_record->fip_priority <
1449 phba->fcf.priority)) {
1450 /* Use this FCF record */
1451 lpfc_copy_fcf_record(phba, new_fcf_record);
1452 phba->fcf.addr_mode = addr_mode;
1453 if (vlan_id != 0xFFFF) {
1454 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1455 phba->fcf.vlan_id = vlan_id;
1456 }
1457 spin_unlock_irqrestore(&phba->hbalock, flags);
1458 goto read_next_fcf;
1459 }
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * This is the first available FCF record, use this
1465 * record.
1466 */
1467 lpfc_copy_fcf_record(phba, new_fcf_record);
1468 phba->fcf.addr_mode = addr_mode;
1469 if (boot_flag)
1470 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1471 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1472 if (vlan_id != 0xFFFF) {
1473 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1474 phba->fcf.vlan_id = vlan_id;
1475 }
1476 spin_unlock_irqrestore(&phba->hbalock, flags);
1477 goto read_next_fcf;
1478
1479read_next_fcf:
1480 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1481 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1482 lpfc_register_fcf(phba);
1483 else
1484 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1485 return;
1486
1487out:
1488 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1489 lpfc_register_fcf(phba);
1490
1491 return;
1492}
1493
1494/**
1495 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1496 * @phba: pointer to lpfc hba data structure.
1497 *
1498 * This function loops through the list of vports on the @phba and issues an
1499 * FDISC if possible.
1500 */
1501void
1502lpfc_start_fdiscs(struct lpfc_hba *phba)
1503{
1504 struct lpfc_vport **vports;
1505 int i;
1506
1507 vports = lpfc_create_vport_work_array(phba);
1508 if (vports != NULL) {
1509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1510 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1511 continue;
1512 /* There are no vpi for this vport */
1513 if (vports[i]->vpi > phba->max_vpi) {
1514 lpfc_vport_set_state(vports[i],
1515 FC_VPORT_FAILED);
1516 continue;
1517 }
1518 if (phba->fc_topology == TOPOLOGY_LOOP) {
1519 lpfc_vport_set_state(vports[i],
1520 FC_VPORT_LINKDOWN);
1521 continue;
1522 }
1523 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1524 lpfc_initial_fdisc(vports[i]);
1525 else {
1526 lpfc_vport_set_state(vports[i],
1527 FC_VPORT_NO_FABRIC_SUPP);
1528 lpfc_printf_vlog(vports[i], KERN_ERR,
1529 LOG_ELS,
1530 "0259 No NPIV "
1531 "Fabric support\n");
1532 }
1533 }
1534 }
1535 lpfc_destroy_vport_work_array(phba, vports);
1536}
1537
1538void
1539lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1540{
1541 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1542 struct lpfc_vport *vport = mboxq->vport;
1543
1544 if (mboxq->u.mb.mbxStatus) {
1545 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1546 "2018 REG_VFI mbxStatus error x%x "
1547 "HBA state x%x\n",
1548 mboxq->u.mb.mbxStatus, vport->port_state);
1549 if (phba->fc_topology == TOPOLOGY_LOOP) {
1550 /* FLOGI failed, use loop map to make discovery list */
1551 lpfc_disc_list_loopmap(vport);
1552 /* Start discovery */
1553 lpfc_disc_start(vport);
1554 goto fail_free_mem;
1555 }
1556 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1557 goto fail_free_mem;
1558 }
1559 /* Mark the vport has registered with its VFI */
1560 vport->vfi_state |= LPFC_VFI_REGISTERED;
1561
1562 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1563 lpfc_start_fdiscs(phba);
1564 lpfc_do_scr_ns_plogi(phba, vport);
1565 }
1566
1567fail_free_mem:
1568 mempool_free(mboxq, phba->mbox_mem_pool);
1569 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1570 kfree(dmabuf);
1571 return;
1572}
1573
1574static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1575lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1576{
964 MAILBOX_t *mb = &pmb->mb; 1577 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1578 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1579 struct lpfc_vport *vport = pmb->vport;
967 1580
@@ -1012,13 +1625,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1625lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1626{
1014 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1628 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1629 int i;
1017 struct lpfc_dmabuf *mp; 1630 struct lpfc_dmabuf *mp;
1018 int rc; 1631 int rc;
1632 struct fcf_record *fcf_record;
1019 1633
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1634 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1635
1023 spin_lock_irq(&phba->hbalock); 1636 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1637 switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1647 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1648 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1649 break;
1650 case LA_10GHZ_LINK:
1651 phba->fc_linkspeed = LA_10GHZ_LINK;
1652 break;
1037 default: 1653 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1654 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1655 break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1731 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1732 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1733 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1734 goto out;
1121 } 1735 }
1122 } 1736 }
1123 1737
1124 if (cfglink_mbox) { 1738 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1739 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1740 if (!cfglink_mbox)
1741 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1742 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1743 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1744 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1745 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1746 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1747 if (rc == MBX_NOT_FINISHED) {
1131 return; 1748 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1749 goto out;
1750 }
1751 } else {
1752 /*
1753 * Add the driver's default FCF record at FCF index 0 now. This
1754 * is phase 1 implementation that support FCF index 0 and driver
1755 * defaults.
1756 */
1757 if (phba->cfg_enable_fip == 0) {
1758 fcf_record = kzalloc(sizeof(struct fcf_record),
1759 GFP_KERNEL);
1760 if (unlikely(!fcf_record)) {
1761 lpfc_printf_log(phba, KERN_ERR,
1762 LOG_MBOX | LOG_SLI,
1763 "2554 Could not allocate memmory for "
1764 "fcf record\n");
1765 rc = -ENODEV;
1766 goto out;
1767 }
1768
1769 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1770 LPFC_FCOE_FCF_DEF_INDEX);
1771 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1772 if (unlikely(rc)) {
1773 lpfc_printf_log(phba, KERN_ERR,
1774 LOG_MBOX | LOG_SLI,
1775 "2013 Could not manually add FCF "
1776 "record 0, status %d\n", rc);
1777 rc = -ENODEV;
1778 kfree(fcf_record);
1779 goto out;
1780 }
1781 kfree(fcf_record);
1782 }
1783 /*
1784 * The driver is expected to do FIP/FCF. Call the port
1785 * and get the FCF Table.
1786 */
1787 rc = lpfc_sli4_read_fcf_record(phba,
1788 LPFC_FCOE_FCF_GET_FIRST);
1789 if (rc)
1790 goto out;
1133 } 1791 }
1792
1793 return;
1134out: 1794out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1795 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1807 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1808 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1809 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1810 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1811 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1812 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1813 writel(control, phba->HCregaddr);
1814 readl(phba->HCregaddr); /* flush */
1815 }
1154 spin_unlock_irq(&phba->hbalock); 1816 spin_unlock_irq(&phba->hbalock);
1155} 1817}
1156 1818
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1821{
1160 lpfc_linkdown(phba); 1822 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1823 lpfc_enable_la(phba);
1824 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1825 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1826}
1164 1827
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1838 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1840 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1841 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1842 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1843
1181 /* Unblock ELS traffic */ 1844 /* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1853 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1854 }
1192 1855
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1856 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1857
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1858 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1859
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 1991static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1992lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 1993{
1331 MAILBOX_t *mb = &pmb->mb; 1994 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 1995 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1996 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 1997
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2044{
1382 struct lpfc_vport *vport = pmb->vport; 2045 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2047 MAILBOX_t *mb = &pmb->u.mb;
1385 2048
1386 switch (mb->mbxStatus) { 2049 switch (mb->mbxStatus) {
1387 case 0x0011: 2050 case 0x0011:
@@ -1416,6 +2079,128 @@ out:
1416 return; 2079 return;
1417} 2080}
1418 2081
2082/**
2083 * lpfc_create_static_vport - Read HBA config region to create static vports.
2084 * @phba: pointer to lpfc hba data structure.
2085 *
2086 * This routine issue a DUMP mailbox command for config region 22 to get
2087 * the list of static vports to be created. The function create vports
2088 * based on the information returned from the HBA.
2089 **/
2090void
2091lpfc_create_static_vport(struct lpfc_hba *phba)
2092{
2093 LPFC_MBOXQ_t *pmb = NULL;
2094 MAILBOX_t *mb;
2095 struct static_vport_info *vport_info;
2096 int rc, i;
2097 struct fc_vport_identifiers vport_id;
2098 struct fc_vport *new_fc_vport;
2099 struct Scsi_Host *shost;
2100 struct lpfc_vport *vport;
2101 uint16_t offset = 0;
2102 uint8_t *vport_buff;
2103
2104 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2105 if (!pmb) {
2106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2107 "0542 lpfc_create_static_vport failed to"
2108 " allocate mailbox memory\n");
2109 return;
2110 }
2111
2112 mb = &pmb->u.mb;
2113
2114 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2115 if (!vport_info) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2117 "0543 lpfc_create_static_vport failed to"
2118 " allocate vport_info\n");
2119 mempool_free(pmb, phba->mbox_mem_pool);
2120 return;
2121 }
2122
2123 vport_buff = (uint8_t *) vport_info;
2124 do {
2125 lpfc_dump_static_vport(phba, pmb, offset);
2126 pmb->vport = phba->pport;
2127 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2128
2129 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2130 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2131 "0544 lpfc_create_static_vport failed to"
2132 " issue dump mailbox command ret 0x%x "
2133 "status 0x%x\n",
2134 rc, mb->mbxStatus);
2135 goto out;
2136 }
2137
2138 if (mb->un.varDmp.word_cnt >
2139 sizeof(struct static_vport_info) - offset)
2140 mb->un.varDmp.word_cnt =
2141 sizeof(struct static_vport_info) - offset;
2142
2143 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2144 vport_buff + offset,
2145 mb->un.varDmp.word_cnt);
2146 offset += mb->un.varDmp.word_cnt;
2147
2148 } while (mb->un.varDmp.word_cnt &&
2149 offset < sizeof(struct static_vport_info));
2150
2151
2152 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2153 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2154 != VPORT_INFO_REV)) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2156 "0545 lpfc_create_static_vport bad"
2157 " information header 0x%x 0x%x\n",
2158 le32_to_cpu(vport_info->signature),
2159 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2160
2161 goto out;
2162 }
2163
2164 shost = lpfc_shost_from_vport(phba->pport);
2165
2166 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2167 memset(&vport_id, 0, sizeof(vport_id));
2168 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2169 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2170 if (!vport_id.port_name || !vport_id.node_name)
2171 continue;
2172
2173 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2174 vport_id.vport_type = FC_PORTTYPE_NPIV;
2175 vport_id.disable = false;
2176 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2177
2178 if (!new_fc_vport) {
2179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2180 "0546 lpfc_create_static_vport failed to"
2181 " create vport \n");
2182 continue;
2183 }
2184
2185 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2186 vport->vport_flag |= STATIC_VPORT;
2187 }
2188
2189out:
2190 /*
2191 * If this is timed out command, setting NULL to context2 tell SLI
2192 * layer not to use this buffer.
2193 */
2194 spin_lock_irq(&phba->hbalock);
2195 pmb->context2 = NULL;
2196 spin_unlock_irq(&phba->hbalock);
2197 kfree(vport_info);
2198 if (rc != MBX_TIMEOUT)
2199 mempool_free(pmb, phba->mbox_mem_pool);
2200
2201 return;
2202}
2203
1419/* 2204/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2205 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2206 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2211lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2212{
1428 struct lpfc_vport *vport = pmb->vport; 2213 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2214 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2215 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2216 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2217
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2218 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2219 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2220 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2221 if (mb->mbxStatus) {
2222 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2223 "0258 Register Fabric login error: 0x%x\n",
2224 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2226 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2227 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2240 }
1455 2241
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2243 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2244 * to the ndlp are done.
1462 */ 2245 */
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2248 }
1466 2249
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2250 ndlp->nlp_rpi = mb->un.varWords[0];
2251 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2252 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2254
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2255 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2256 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2257 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2258 }
1498 2259
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2277void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2278lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2279{
1519 MAILBOX_t *mb = &pmb->mb; 2280 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2281 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2282 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2283 struct lpfc_vport *vport = pmb->vport;
1523 2284
1524 if (mb->mbxStatus) { 2285 if (mb->mbxStatus) {
1525out: 2286out:
2287 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2288 "0260 Register NameServer error: 0x%x\n",
2289 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2290 /* decrement the node reference count held for this
1527 * callback function. 2291 * callback function.
1528 */ 2292 */
@@ -1546,15 +2310,13 @@ out:
1546 return; 2310 return;
1547 } 2311 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2313 return;
1553 } 2314 }
1554 2315
1555 pmb->context1 = NULL; 2316 pmb->context1 = NULL;
1556 2317
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2318 ndlp->nlp_rpi = mb->un.varWords[0];
2319 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2320 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2321 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2322
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2817 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2818 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2819 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2820 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2821 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2822 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2823 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2864 */
2103 psli = &phba->sli; 2865 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2866 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2867 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2868 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2869 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2870 pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2912 LPFC_MBOXQ_t *mbox;
2151 int rc; 2913 int rc;
2152 2914
2153 if (ndlp->nlp_rpi) { 2915 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2916 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2917 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2918 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2924 }
2163 lpfc_no_rpi(phba, ndlp); 2925 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2926 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2165 return 1; 2928 return 1;
2166 } 2929 }
2167 return 0; 2930 return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3015
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3016 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3017 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3018 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3019 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3020 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3021 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3024
2262 spin_lock_irq(&phba->hbalock); 3025 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3026 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3027 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3028 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3029 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3030 if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3072 int rc;
2310 3073
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3074 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3075 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3076 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3077 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3078 * allocated by the firmware.
2315 */ 3079 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3080 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3081 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3082 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3083 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3084 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3085 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3317 * clear_la then don't send it.
2554 */ 3318 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3319 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3320 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3321 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3322 return;
2558 3323
2559 /* Link up discovery */ 3324 /* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3347
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3348 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3349 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3350 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3351 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3352 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3353 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3407 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3408 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3409 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3410 !(vport->fc_flag & FC_RSCN_MODE) &&
3411 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3412 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3413 return;
2648 } 3414 }
@@ -2919,11 +3685,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3685 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3686 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3687 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3688 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3689 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3690 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3691 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3692 lpfc_issue_clear_la(phba, vport);
3693 vport->port_state = LPFC_VPORT_READY;
3694 }
2927 } 3695 }
2928 3696
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3697 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3707 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3708 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3709 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3710 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3711 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3712 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3713 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3727 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3728 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3729 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3730 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3732 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3733 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3734 lpfc_issue_clear_la(phba, vport);
3735 vport->port_state = LPFC_VPORT_READY;
3736 }
2967 } 3737 }
2968 break; 3738 break;
2969 3739
@@ -3036,7 +3806,7 @@ restart_disc:
3036void 3806void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3807lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3808{
3039 MAILBOX_t *mb = &pmb->mb; 3809 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3810 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3811 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3812 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3814 pmb->context1 = NULL;
3045 3815
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3816 ndlp->nlp_rpi = mb->un.varWords[0];
3817 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3818 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3819 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3820
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4068 return 1;
3298 return 0; 4069 return 0;
3299} 4070}
4071
4072/**
4073 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4074 * @phba: Pointer to hba context object.
4075 *
4076 * This function iterate through all FC nodes associated
4077 * will all vports to check if there is any node with
4078 * fc_rports associated with it. If there is an fc_rport
4079 * associated with the node, then the node is either in
4080 * discovered state or its devloss_timer is pending.
4081 */
4082static int
4083lpfc_fcf_inuse(struct lpfc_hba *phba)
4084{
4085 struct lpfc_vport **vports;
4086 int i, ret = 0;
4087 struct lpfc_nodelist *ndlp;
4088 struct Scsi_Host *shost;
4089
4090 vports = lpfc_create_vport_work_array(phba);
4091
4092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4093 shost = lpfc_shost_from_vport(vports[i]);
4094 spin_lock_irq(shost->host_lock);
4095 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4096 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4097 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4098 ret = 1;
4099 spin_unlock_irq(shost->host_lock);
4100 goto out;
4101 }
4102 }
4103 spin_unlock_irq(shost->host_lock);
4104 }
4105out:
4106 lpfc_destroy_vport_work_array(phba, vports);
4107 return ret;
4108}
4109
4110/**
4111 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4112 * @phba: Pointer to hba context object.
4113 * @mboxq: Pointer to mailbox object.
4114 *
4115 * This function frees memory associated with the mailbox command.
4116 */
4117static void
4118lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4119{
4120 struct lpfc_vport *vport = mboxq->vport;
4121
4122 if (mboxq->u.mb.mbxStatus) {
4123 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4124 "2555 UNREG_VFI mbxStatus error x%x "
4125 "HBA state x%x\n",
4126 mboxq->u.mb.mbxStatus, vport->port_state);
4127 }
4128 mempool_free(mboxq, phba->mbox_mem_pool);
4129 return;
4130}
4131
4132/**
4133 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4134 * @phba: Pointer to hba context object.
4135 * @mboxq: Pointer to mailbox object.
4136 *
4137 * This function frees memory associated with the mailbox command.
4138 */
4139static void
4140lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4141{
4142 struct lpfc_vport *vport = mboxq->vport;
4143
4144 if (mboxq->u.mb.mbxStatus) {
4145 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4146 "2550 UNREG_FCFI mbxStatus error x%x "
4147 "HBA state x%x\n",
4148 mboxq->u.mb.mbxStatus, vport->port_state);
4149 }
4150 mempool_free(mboxq, phba->mbox_mem_pool);
4151 return;
4152}
4153
4154/**
4155 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4156 * @phba: Pointer to hba context object.
4157 *
4158 * This function check if there are any connected remote port for the FCF and
4159 * if all the devices are disconnected, this function unregister FCFI.
4160 * This function also tries to use another FCF for discovery.
4161 */
4162void
4163lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4164{
4165 LPFC_MBOXQ_t *mbox;
4166 int rc;
4167 struct lpfc_vport **vports;
4168 int i;
4169
4170 spin_lock_irq(&phba->hbalock);
4171 /*
4172 * If HBA is not running in FIP mode or
4173 * If HBA does not support FCoE or
4174 * If FCF is not registered.
4175 * do nothing.
4176 */
4177 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4178 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4179 (phba->cfg_enable_fip == 0)) {
4180 spin_unlock_irq(&phba->hbalock);
4181 return;
4182 }
4183 spin_unlock_irq(&phba->hbalock);
4184
4185 if (lpfc_fcf_inuse(phba))
4186 return;
4187
4188
4189 /* Unregister VPIs */
4190 vports = lpfc_create_vport_work_array(phba);
4191 if (vports &&
4192 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4193 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4194 lpfc_mbx_unreg_vpi(vports[i]);
4195 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4197 }
4198 lpfc_destroy_vport_work_array(phba, vports);
4199
4200 /* Unregister VFI */
4201 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4202 if (!mbox) {
4203 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4204 "2556 UNREG_VFI mbox allocation failed"
4205 "HBA state x%x\n",
4206 phba->pport->port_state);
4207 return;
4208 }
4209
4210 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4211 mbox->vport = phba->pport;
4212 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4213
4214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4215 if (rc == MBX_NOT_FINISHED) {
4216 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4217 "2557 UNREG_VFI issue mbox failed rc x%x "
4218 "HBA state x%x\n",
4219 rc, phba->pport->port_state);
4220 mempool_free(mbox, phba->mbox_mem_pool);
4221 return;
4222 }
4223
4224 /* Unregister FCF */
4225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4226 if (!mbox) {
4227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4228 "2551 UNREG_FCFI mbox allocation failed"
4229 "HBA state x%x\n",
4230 phba->pport->port_state);
4231 return;
4232 }
4233
4234 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4235 mbox->vport = phba->pport;
4236 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4238
4239 if (rc == MBX_NOT_FINISHED) {
4240 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4241 "2552 UNREG_FCFI issue mbox failed rc x%x "
4242 "HBA state x%x\n",
4243 rc, phba->pport->port_state);
4244 mempool_free(mbox, phba->mbox_mem_pool);
4245 return;
4246 }
4247
4248 spin_lock_irq(&phba->hbalock);
4249 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4250 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4251 FCF_VALID_VLAN);
4252 spin_unlock_irq(&phba->hbalock);
4253
4254 /*
4255 * If driver is not unloading, check if there is any other
4256 * FCF record that can be used for discovery.
4257 */
4258 if ((phba->pport->load_flag & FC_UNLOADING) ||
4259 (phba->link_state < LPFC_LINK_UP))
4260 return;
4261
4262 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4263
4264 if (rc)
4265 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4266 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4267 " record HBA state x%x\n",
4268 phba->pport->port_state);
4269}
4270
4271/**
4272 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4273 * @phba: Pointer to hba context object.
4274 * @buff: Buffer containing the FCF connection table as in the config
4275 * region.
4276 * This function create driver data structure for the FCF connection
4277 * record table read from config region 23.
4278 */
4279static void
4280lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4281 uint8_t *buff)
4282{
4283 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4284 struct lpfc_fcf_conn_hdr *conn_hdr;
4285 struct lpfc_fcf_conn_rec *conn_rec;
4286 uint32_t record_count;
4287 int i;
4288
4289 /* Free the current connect table */
4290 list_for_each_entry_safe(conn_entry, next_conn_entry,
4291 &phba->fcf_conn_rec_list, list)
4292 kfree(conn_entry);
4293
4294 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4295 record_count = conn_hdr->length * sizeof(uint32_t)/
4296 sizeof(struct lpfc_fcf_conn_rec);
4297
4298 conn_rec = (struct lpfc_fcf_conn_rec *)
4299 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4300
4301 for (i = 0; i < record_count; i++) {
4302 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4303 continue;
4304 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4305 GFP_KERNEL);
4306 if (!conn_entry) {
4307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4308 "2566 Failed to allocate connection"
4309 " table entry\n");
4310 return;
4311 }
4312
4313 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4314 sizeof(struct lpfc_fcf_conn_rec));
4315 conn_entry->conn_rec.vlan_tag =
4316 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4317 conn_entry->conn_rec.flags =
4318 le16_to_cpu(conn_entry->conn_rec.flags);
4319 list_add_tail(&conn_entry->list,
4320 &phba->fcf_conn_rec_list);
4321 }
4322}
4323
4324/**
4325 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4326 * @phba: Pointer to hba context object.
4327 * @buff: Buffer containing the FCoE parameter data structure.
4328 *
4329 * This function update driver data structure with config
4330 * parameters read from config region 23.
4331 */
4332static void
4333lpfc_read_fcoe_param(struct lpfc_hba *phba,
4334 uint8_t *buff)
4335{
4336 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4337 struct lpfc_fcoe_params *fcoe_param;
4338
4339 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4340 buff;
4341 fcoe_param = (struct lpfc_fcoe_params *)
4342 buff + sizeof(struct lpfc_fip_param_hdr);
4343
4344 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4345 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4346 return;
4347
4348 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4349 FIPP_MODE_ON)
4350 phba->cfg_enable_fip = 1;
4351
4352 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4353 FIPP_MODE_OFF)
4354 phba->cfg_enable_fip = 0;
4355
4356 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4357 phba->valid_vlan = 1;
4358 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4359 0xFFF;
4360 }
4361
4362 phba->fc_map[0] = fcoe_param->fc_map[0];
4363 phba->fc_map[1] = fcoe_param->fc_map[1];
4364 phba->fc_map[2] = fcoe_param->fc_map[2];
4365 return;
4366}
4367
4368/**
4369 * lpfc_get_rec_conf23 - Get a record type in config region data.
4370 * @buff: Buffer containing config region 23 data.
4371 * @size: Size of the data buffer.
4372 * @rec_type: Record type to be searched.
4373 *
4374 * This function searches config region data to find the begining
4375 * of the record specified by record_type. If record found, this
4376 * function return pointer to the record else return NULL.
4377 */
4378static uint8_t *
4379lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4380{
4381 uint32_t offset = 0, rec_length;
4382
4383 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4384 (size < sizeof(uint32_t)))
4385 return NULL;
4386
4387 rec_length = buff[offset + 1];
4388
4389 /*
4390 * One TLV record has one word header and number of data words
4391 * specified in the rec_length field of the record header.
4392 */
4393 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4394 <= size) {
4395 if (buff[offset] == rec_type)
4396 return &buff[offset];
4397
4398 if (buff[offset] == LPFC_REGION23_LAST_REC)
4399 return NULL;
4400
4401 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4402 rec_length = buff[offset + 1];
4403 }
4404 return NULL;
4405}
4406
4407/**
4408 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4409 * @phba: Pointer to lpfc_hba data structure.
4410 * @buff: Buffer containing config region 23 data.
4411 * @size: Size of the data buffer.
4412 *
4413 * This fuction parse the FCoE config parameters in config region 23 and
4414 * populate driver data structure with the parameters.
4415 */
4416void
4417lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4418 uint8_t *buff,
4419 uint32_t size)
4420{
4421 uint32_t offset = 0, rec_length;
4422 uint8_t *rec_ptr;
4423
4424 /*
4425 * If data size is less than 2 words signature and version cannot be
4426 * verified.
4427 */
4428 if (size < 2*sizeof(uint32_t))
4429 return;
4430
4431 /* Check the region signature first */
4432 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2567 Config region 23 has bad signature\n");
4435 return;
4436 }
4437
4438 offset += 4;
4439
4440 /* Check the data structure version */
4441 if (buff[offset] != LPFC_REGION23_VERSION) {
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443 "2568 Config region 23 has bad version\n");
4444 return;
4445 }
4446 offset += 4;
4447
4448 rec_length = buff[offset + 1];
4449
4450 /* Read FCoE param record */
4451 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4452 size - offset, FCOE_PARAM_TYPE);
4453 if (rec_ptr)
4454 lpfc_read_fcoe_param(phba, rec_ptr);
4455
4456 /* Read FCF connection table */
4457 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4458 size - offset, FCOE_CONN_TBL_TYPE);
4459 if (rec_ptr)
4460 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4461
4462}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b8..02aa016b93e9 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,9 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1155 1187
1156#define JEDEC_ID_ADDRESS 0x0080001c 1188#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1189#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1374#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1375#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1376#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1377
1348#define MBX_WRITE_WWN 0x98 1378#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1379#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1380#define MBX_LOAD_EXP_ROM 0x9C
1351 1381#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1382#define MBX_SLI4_REQ_FTRS 0x9D
1383#define MBX_MAX_CMDS 0x9E
1384#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1385#define MBX_SLI2_CMD_MASK 0x80
1386#define MBX_REG_VFI 0x9F
1387#define MBX_REG_FCFI 0xA0
1388#define MBX_UNREG_VFI 0xA1
1389#define MBX_UNREG_FCFI 0xA2
1390#define MBX_INIT_VFI 0xA3
1391#define MBX_INIT_VPI 0xA4
1354 1392
1355/* IOCB Commands */ 1393/* IOCB Commands */
1356 1394
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1478#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1479#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1480
1481/* Unhandled Data Security SLI Commands */
1482#define DSSCMD_IWRITE64_CR 0xD8
1483#define DSSCMD_IWRITE64_CX 0xD9
1484#define DSSCMD_IREAD64_CR 0xDA
1485#define DSSCMD_IREAD64_CX 0xDB
1486#define DSSCMD_INVALIDATE_DEK 0xDC
1487#define DSSCMD_SET_KEK 0xDD
1488#define DSSCMD_GET_KEK_ID 0xDE
1489#define DSSCMD_GEN_XFER 0xDF
1490
1443#define CMD_MAX_IOCB_CMD 0xE6 1491#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1492#define CMD_IOCB_MASK 0xff
1445 1493
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1514#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1515#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1516#define MBXERR_ERROR 16
1517#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1518#define MBX_NOT_FINISHED 255
1470 1519
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1520#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
1504#endif 1553#endif
1505}; 1554};
1506 1555
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1556typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1557#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1558 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
2287 uint32_t rsvd3; 2310 uint32_t rsvd3;
2288 uint32_t rsvd4; 2311 uint32_t rsvd4;
2289 uint32_t rsvd5; 2312 uint32_t rsvd5;
2290 uint16_t rsvd6; 2313 uint16_t vfi;
2291 uint16_t vpi; 2314 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2315#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2316 uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
2297 uint32_t rsvd4; 2320 uint32_t rsvd4;
2298 uint32_t rsvd5; 2321 uint32_t rsvd5;
2299 uint16_t vpi; 2322 uint16_t vpi;
2300 uint16_t rsvd6; 2323 uint16_t vfi;
2301#endif 2324#endif
2302} REG_VPI_VAR; 2325} REG_VPI_VAR;
2303 2326
@@ -2457,7 +2480,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2480 uint32_t entry_index:16;
2458#endif 2481#endif
2459 2482
2460 uint32_t rsvd1; 2483 uint32_t sli4_length;
2461 uint32_t word_cnt; 2484 uint32_t word_cnt;
2462 uint32_t resp_offset; 2485 uint32_t resp_offset;
2463} DUMP_VAR; 2486} DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2493#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2494#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2495
2496#define DMP_REGION_VPORT 0x16 /* VPort info region */
2497#define DMP_VPORT_REGION_SIZE 0x200
2498#define DMP_MBOX_OFFSET_WORD 0x5
2499
2500#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2501#define DMP_FCOEPARAM_RGN_SIZE 0x400
2502
2473#define WAKE_UP_PARMS_REGION_ID 4 2503#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2504#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2505
2506struct vport_rec {
2507 uint8_t wwpn[8];
2508 uint8_t wwnn[8];
2509};
2510
2511#define VPORT_INFO_SIG 0x32324752
2512#define VPORT_INFO_REV_MASK 0xff
2513#define VPORT_INFO_REV 0x1
2514#define MAX_STATIC_VPORT_COUNT 16
2515struct static_vport_info {
2516 uint32_t signature;
2517 uint32_t rev;
2518 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2519 uint32_t resvd[66];
2520};
2521
2476/* Option rom version structure */ 2522/* Option rom version structure */
2477struct prog_id { 2523struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2524#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
2697#endif 2743#endif
2698 2744
2699#ifdef __BIG_ENDIAN_BITFIELD 2745#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2746 uint32_t rsvd1 : 19; /* Reserved */
2747 uint32_t cdss : 1; /* Configure Data Security SLI */
2748 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2749 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2750 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2751 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2765,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2765 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2766 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2767 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2768 uint32_t rsvd2 : 3; /* Reserved */
2769 uint32_t cdss : 1; /* Configure Data Security SLI */
2770 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2771#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2772#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2773 uint32_t rsvd3 : 19; /* Reserved */
2774 uint32_t gdss : 1; /* Configure Data Security SLI */
2775 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2776 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2777 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2778 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2792,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2792 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2793 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2794 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2795 uint32_t rsvd4 : 3; /* Reserved */
2796 uint32_t gdss : 1; /* Configure Data Security SLI */
2797 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2798#endif
2745 2799
2746#ifdef __BIG_ENDIAN_BITFIELD 2800#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
2753 2807
2754#ifdef __BIG_ENDIAN_BITFIELD 2808#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2810 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2811#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2813 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2814#endif
2761 2815
2762 uint32_t rsvd4; /* Reserved */ 2816 uint32_t rsvd6; /* Reserved */
2763 2817
2764#ifdef __BIG_ENDIAN_BITFIELD 2818#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2819 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2820 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2821#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2822 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2823 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2824#endif
2771 2825
2772} CONFIG_PORT_VAR; 2826} CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3720#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3721#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3722#define SETVAR_MLORST 0x103007
3723
3724#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 000000000000..39c34b3ad29d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d8..2f5907f92eea 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 483 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 484 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 487 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 488 return -EIO;
466 } 489 }
467 } 490 }
468 491
492 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 493 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 495
472 /* Enable appropriate host interrupts */ 496 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 497 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 499 if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 594{
572 struct lpfc_vport **vports; 595 struct lpfc_vport **vports;
573 int i; 596 int i;
574 /* Disable interrupts */ 597
575 writel(0, phba->HCregaddr); 598 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 }
577 603
578 if (phba->pport->load_flag & FC_UNLOADING) 604 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 605 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 606 else {
581 vports = lpfc_create_vport_work_array(phba); 607 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 608 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 611 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 612 lpfc_destroy_vport_work_array(phba, vports);
586 } 613 }
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 615}
589 616
590/** 617/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 619 * @phba: pointer to lpfc HBA data structure.
593 * 620 *
594 * This routine will do uninitialization after the HBA is reset when bring 621 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 625 * 0 - sucess.
599 * Any other value - error. 626 * Any other value - error.
600 **/ 627 **/
601int 628static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 629lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 630{
604 struct lpfc_sli *psli = &phba->sli; 631 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 632 struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 669
643 return 0; 670 return 0;
644} 671}
672/**
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
675 *
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
678 *
679 * Return codes
680 * 0 - sucess.
681 * Any other value - error.
682 **/
683static int
684lpfc_hba_down_post_s4(struct lpfc_hba *phba)
685{
686 struct lpfc_scsi_buf *psb, *psb_next;
687 LIST_HEAD(aborts);
688 int ret;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
691 if (ret)
692 return ret;
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
697 * the port.
698 */
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
700 /* scsl_buf_list */
701 /* abts_sgl_list_lock required because worker thread uses this
702 * list.
703 */
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
709 * list.
710 */
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
713 &aborts);
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
716
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
718 psb->pCmd = NULL;
719 psb->status = IOSTAT_SUCCESS;
720 }
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
724 return 0;
725}
726
727/**
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
730 *
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
733 *
734 * Return codes
735 * 0 - sucess.
736 * Any other value - error.
737 **/
738int
739lpfc_hba_down_post(struct lpfc_hba *phba)
740{
741 return (*phba->lpfc_hba_down_post)(phba);
742}
645 743
646/** 744/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 745 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 907 "taking this port offline.\n");
810 908
811 spin_lock_irq(&phba->hbalock); 909 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 911 spin_unlock_irq(&phba->hbalock);
814 912
815 lpfc_offline_prep(phba); 913 lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 932 struct lpfc_sli *psli = &phba->sli;
835 933
836 spin_lock_irq(&phba->hbalock); 934 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 936 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 937 lpfc_offline_prep(phba);
840 938
841 lpfc_offline(phba); 939 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 940 lpfc_reset_barrier(phba);
941 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 942 lpfc_sli_brdreset(phba);
943 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 944 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 945 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 946 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 949}
850 950
851/** 951/**
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
954 *
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
957 **/
958static void
959lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
960{
961 lpfc_offline_prep(phba);
962 lpfc_offline(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
968}
969
970/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 972 * @phba: pointer to lpfc hba data structure.
854 * 973 *
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 983 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 984 struct lpfc_sli *psli = &phba->sli;
866 985
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
988 */
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
993 return;
994 }
995
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 997 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 998 "Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1000 phba->work_status[0], phba->work_status[1]);
872 1001
873 spin_lock_irq(&phba->hbalock); 1002 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1004 spin_unlock_irq(&phba->hbalock);
876 1005
877 1006
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1039 phba->work_hs = old_host_status & ~HS_FFER1;
911 1040
1041 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1042 phba->hba_flag &= ~DEFER_ERATT;
1043 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1046}
916 1047
1048static void
1049lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1050{
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1053
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1060 LPFC_NL_VENDOR_ID);
1061}
1062
917/** 1063/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1065 * @phba: pointer to lpfc hba data structure.
920 * 1066 *
921 * This routine is invoked to handle the following HBA hardware error 1067 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1070 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1071 * 3 - Mailbox command came back as unknown
926 **/ 1072 **/
927void 1073static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1074lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1075{
930 struct lpfc_vport *vport = phba->pport; 1076 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1077 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1080 unsigned long temperature;
935 struct temp_event temp_event_data; 1081 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1082 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1083
939 /* If the pci channel is offline, ignore possible errors, 1084 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1085 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1086 */
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
942 return; 1091 return;
1092 }
1093
943 /* If resets are disabled then leave the HBA alone and return */ 1094 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1095 if (!phba->cfg_enable_hba_reset)
945 return; 1096 return;
946 1097
947 /* Send an internal error event to mgmt application */ 1098 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1099 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1100
956 if (phba->hba_flag & DEFER_ERATT) 1101 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1102 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1110 phba->work_status[0], phba->work_status[1]);
966 1111
967 spin_lock_irq(&phba->hbalock); 1112 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1114 spin_unlock_irq(&phba->hbalock);
970 1115
971 /* 1116 /*
@@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1182}
1038 1183
1039/** 1184/**
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1189 * conditions.
1190 **/
1191static void
1192lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1193{
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1197
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev))
1202 return;
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1205 return;
1206
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1209
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1212 */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1216
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1222
1223 lpfc_sli4_offline_eratt(phba);
1224}
1225
1226/**
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1229 *
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1232 *
1233 * Return codes
1234 * 0 - sucess.
1235 * Any other value - error.
1236 **/
1237void
1238lpfc_handle_eratt(struct lpfc_hba *phba)
1239{
1240 (*phba->lpfc_handle_eratt)(phba);
1241}
1242
1243/**
1040 * lpfc_handle_latt - The HBA link event handler 1244 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1245 * @phba: pointer to lpfc hba data structure.
1042 * 1246 *
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1341 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1342 * 1 - success
1139 **/ 1343 **/
1140static int 1344int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1345lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1346{
1143 uint8_t lenlo, lenhi; 1347 uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1496 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1497 int max_speed;
1294 int GE = 0; 1498 int GE = 0;
1499 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1500 struct {
1296 char * name; 1501 char * name;
1297 int max_speed; 1502 int max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1642 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1644 break;
1645 case PCI_DEVICE_ID_TIGERSHARK:
1646 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1440 default: 1653 default:
1441 m = (typeof(m)){ NULL }; 1654 m = (typeof(m)){ NULL };
1442 break; 1655 break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1657
1445 if (mdp && mdp[0] == '\0') 1658 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1659 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1660 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1661 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1662 */
1450 m.name, m.max_speed, 1663 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1664 if (oneConnect)
1452 m.bus, 1665 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1667 m.name,
1668 phba->Port);
1669 else
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1673 (GE) ? "GE" : "Gb",
1674 m.bus,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1677 }
1454} 1678}
1455 1679
1456/** 1680/**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1758 icmd->ulpLe = 1;
1535 1759
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1761 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1763 kfree(mp1);
1539 cnt++; 1764 cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1986 * Lets wait for this to happen, if needed.
1762 */ 1987 */
1763 while (!list_empty(&vport->fc_nodes)) { 1988 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1989 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1991 "0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2006 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2007 msleep(10);
1784 } 2008 }
1785 return;
1786} 2009}
1787 2010
1788/** 2011/**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2026}
1804 2027
1805/** 2028/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2030 * @phba: pointer to lpfc hba data structure.
1808 * 2031 *
1809 * This routine stops all the timers associated with a HBA. This function is 2032 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2033 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2034 **/
1812static void 2035void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2036lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2037{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2038 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2039 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2040 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2041 del_timer_sync(&phba->eratt_poll);
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2044
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2049 break;
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2052 break;
2053 default:
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2056 phba->pci_dev_grp);
2057 break;
2058 }
1822 return; 2059 return;
1823} 2060}
1824 2061
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2115 return 1;
1879 } 2116 }
1880 2117
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2118 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2120 lpfc_unblock_mgmt_io(phba);
2121 return 1;
2122 }
2123 } else {
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2126 return 1;
2127 }
1884 } 2128 }
1885 2129
1886 vports = lpfc_create_vport_work_array(phba); 2130 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2131 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2133 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2134 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2135 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2191 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2192 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2193 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2195 struct Scsi_Host *shost;
1952 2196
1953 if (vports[i]->load_flag & FC_UNLOADING) 2197 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2198 continue;
2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2200 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2201 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2202 &vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2220 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2221 lpfc_destroy_vport_work_array(phba, vports);
1977 2222
1978 lpfc_sli_flush_mbox_queue(phba); 2223 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2224}
1980 2225
1981/** 2226/**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2242 return;
1998 2243
1999 /* stop all timers associated with this hba */ 2244 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2245 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2246 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2247 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2249 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2250 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2258 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2259 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2260 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2262 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2263 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2264 vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2351 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2352 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2353 shost->max_cmd_len = 16;
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2357 }
2109 2358
2110 /* 2359 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2360 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2372
2124 /* Initialize all internally managed lists. */ 2373 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2374 INIT_LIST_HEAD(&vport->fc_nodes);
2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2376 spin_lock_init(&vport->work_port_lock);
2127 2377
2128 init_timer(&vport->fc_disctmo); 2378 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2564}
2315 2565
2316/** 2566/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2567 * lpfc_stop_port_s3 - Stop SLI3 device port
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2572 * device.
2573 **/
2574static void
2575lpfc_stop_port_s3(struct lpfc_hba *phba)
2576{
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
2583
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2587}
2588
2589/**
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2592 *
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2595 * device.
2596 **/
2597static void
2598lpfc_stop_port_s4(struct lpfc_hba *phba)
2599{
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2606}
2607
2608/**
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2611 *
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2614 **/
2615void
2616lpfc_stop_port(struct lpfc_hba *phba)
2617{
2618 phba->lpfc_stop_port(phba);
2619}
2620
2621/**
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2624 *
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2627 *
2628 **/
2629void
2630lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2631{
2632 int rc = 0;
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
2637
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2639 if (!mboxq) {
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2642 return;
2643 }
2644
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
2650 /*
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
2653 */
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
2658
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2661 else {
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2664 }
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
2675 }
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
2678}
2679
2680/**
2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2682 * @phba: pointer to lpfc hba data structure.
2683 * @acqe_link: pointer to the async link completion queue entry.
2684 *
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2687 * status.
2688 *
2689 * Return: Link-attention status in terms of base driver's coding.
2690 **/
2691static uint16_t
2692lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
2694{
2695 uint16_t latt_fault;
2696
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2701 latt_fault = 0;
2702 break;
2703 default:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2708 break;
2709 }
2710 return latt_fault;
2711}
2712
2713/**
2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2715 * @phba: pointer to lpfc hba data structure.
2716 * @acqe_link: pointer to the async link completion queue entry.
2717 *
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
2720 *
2721 * Return: Link attention type in terms of base driver's coding.
2722 **/
2723static uint8_t
2724lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
2726{
2727 uint8_t att_type;
2728
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2733 break;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2737 break;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2740 break;
2741 default:
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2746 break;
2747 }
2748 return att_type;
2749}
2750
2751/**
2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2753 * @phba: pointer to lpfc hba data structure.
2754 * @acqe_link: pointer to the async link completion queue entry.
2755 *
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2758 *
2759 * Return: Link-attention link speed in terms of base driver's coding.
2760 **/
2761static uint8_t
2762lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
2764{
2765 uint8_t link_speed;
2766
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2779 break;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2782 break;
2783 default:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2788 break;
2789 }
2790 return link_speed;
2791}
2792
2793/**
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2797 *
2798 * This routine is to handle the SLI4 asynchronous link event.
2799 **/
2800static void
2801lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2803{
2804 struct lpfc_dmabuf *mp;
2805 LPFC_MBOXQ_t *pmb;
2806 MAILBOX_t *mb;
2807 READ_LA_VAR *la;
2808 uint8_t att_type;
2809
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2812 return;
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2814 if (!pmb) {
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2817 return;
2818 }
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2820 if (!mp) {
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2823 goto out_free_pmb;
2824 }
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2826 if (!mp->virt) {
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2830 }
2831
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2834
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2837
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2840
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2844
2845 /* Parse and translate status field */
2846 mb = &pmb->u.mb;
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2848
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2854
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2858 la->il = 0;
2859 la->pb = 0;
2860 la->fa = 0;
2861 la->mm = 0;
2862
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2874
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2877
2878 return;
2879
2880out_free_dmabuf:
2881 kfree(mp);
2882out_free_pmb:
2883 mempool_free(pmb, phba->mbox_mem_pool);
2884}
2885
2886/**
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2890 *
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2892 **/
2893static void
2894lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2896{
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2898 int rc;
2899
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2906 /*
2907 * If the current FCF is in discovered state,
2908 * do nothing.
2909 */
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2913 break;
2914 }
2915 spin_unlock_irq(&phba->hbalock);
2916
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2920 if (rc)
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2923 rc);
2924 break;
2925
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2931 break;
2932
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2940 break;
2941 /*
2942 * Currently, driver support only one FCF - so treat this as
2943 * a link down.
2944 */
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2948 break;
2949
2950 default:
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2954 break;
2955 }
2956}
2957
2958/**
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2962 *
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2964 **/
2965static void
2966lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2968{
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2971 "handled yet\n");
2972}
2973
2974/**
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2977 *
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2980 **/
2981void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2982{
2983 struct lpfc_cq_event *cq_event;
2984
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3001 break;
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3005 break;
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3009 break;
3010 default:
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3015 break;
3016 }
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3019 }
3020}
3021
3022/**
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3026 *
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3029 *
3030 * Return: 0 if success, otherwise -ENODEV
3031 **/
3032int
3033lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3034{
3035 int rc;
3036
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3039
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3043
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3046 if (rc)
3047 return -ENODEV;
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3050 if (rc)
3051 return -ENODEV;
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3054 if (rc)
3055 return -ENODEV;
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3058 if (rc)
3059 return -ENODEV;
3060
3061 return 0;
3062}
3063
3064/**
3065 * lpfc_log_intr_mode - Log the active interrupt mode
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3068 *
3069 * This routine it invoked to log the currently used active interrupt mode
3070 * to the device.
3071 **/
3072static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3073{
3074 switch (intr_mode) {
3075 case 0:
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3078 break;
3079 case 1:
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3082 break;
3083 case 2:
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3086 break;
3087 default:
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3090 break;
3091 }
3092 return;
3093}
3094
3095/**
3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
3097 * @phba: pointer to lpfc hba data structure.
3098 *
3099 * This routine is invoked to enable the PCI device that is common to all
3100 * PCI devices.
3101 *
3102 * Return codes
3103 * 0 - sucessful
3104 * other values - error
3105 **/
3106static int
3107lpfc_enable_pci_dev(struct lpfc_hba *phba)
3108{
3109 struct pci_dev *pdev;
3110 int bars;
3111
3112 /* Obtain PCI device reference */
3113 if (!phba->pcidev)
3114 goto out_error;
3115 else
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3121 goto out_error;
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
3129
3130 return 0;
3131
3132out_disable_device:
3133 pci_disable_device(pdev);
3134out_error:
3135 return -ENODEV;
3136}
3137
3138/**
3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3142 * This routine is invoked to disable the PCI device that is common to all
3143 * PCI devices.
3144 **/
3145static void
3146lpfc_disable_pci_dev(struct lpfc_hba *phba)
3147{
3148 struct pci_dev *pdev;
3149 int bars;
3150
3151 /* Obtain PCI device reference */
3152 if (!phba->pcidev)
3153 return;
3154 else
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
3163
3164 return;
3165}
3166
3167/**
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
3170 *
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
3175 **/
3176void
3177lpfc_reset_hba(struct lpfc_hba *phba)
3178{
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3182 return;
3183 }
3184 lpfc_offline_prep(phba);
3185 lpfc_offline(phba);
3186 lpfc_sli_brdrestart(phba);
3187 lpfc_online(phba);
3188 lpfc_unblock_mgmt_io(phba);
3189}
3190
3191/**
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3197 *
3198 * Return codes
3199 * 0 - sucessful
3200 * other values - error
3201 **/
3202static int
3203lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3204{
3205 struct lpfc_sli *psli;
3206
3207 /*
3208 * Initialize timers used by driver
3209 */
3210
3211 /* Heartbeat timer */
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3215
3216 psli = &phba->sli;
3217 /* MBOX heartbeat timer */
3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
3220 psli->mbox_tmo.data = (unsigned long) phba;
3221 /* FCP polling mode timer */
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3225 /* Fabric block timer */
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3229 /* EA polling mode timer */
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
3233
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3237
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
3240 /*
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
3244 */
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3248
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3253 }
3254
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3258
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
3262
3263 /*
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3265 */
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
3268
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3271 return -ENOMEM;
3272
3273 return 0;
3274}
3275
3276/**
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3282 **/
3283static void
3284lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3285{
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3288
3289 return;
3290}
3291
3292/**
3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3294 * @phba: pointer to lpfc hba data structure.
3295 *
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3298 *
3299 * Return codes
3300 * 0 - sucessful
3301 * other values - error
3302 **/
3303static int
3304lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3305{
3306 struct lpfc_sli *psli;
3307 int rc;
3308 int i, hbq_count;
3309
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3312 if (rc)
3313 return -ENODEV;
3314
3315 /*
3316 * Initialize timers used by driver
3317 */
3318
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3323
3324 psli = &phba->sli;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3337 /*
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3343 */
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3349
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3355
3356 /*
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3364 * 1k 50 52
3365 * 2k 114 116
3366 * 4k 242 244
3367 * 8k 498 500
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3372 */
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3379 else
3380 phba->cfg_sg_seg_cnt = 498;
3381
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3386
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3394
3395 /*
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3397 */
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3403
3404 /*
3405 * Initialize dirver internal slow-path work queues
3406 */
3407
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3420
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3424
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3427 if (rc)
3428 return -ENOMEM;
3429
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3432 if (unlikely(rc))
3433 goto out_free_mem;
3434
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3437 if (unlikely(rc))
3438 goto out_free_bsmbx;
3439
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3442 if (unlikely(rc))
3443 goto out_free_bsmbx;
3444
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3447 if (unlikely(rc))
3448 goto out_free_bsmbx;
3449
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3452 if (rc)
3453 goto out_free_bsmbx;
3454
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3457 if (rc)
3458 goto out_destroy_queue;
3459
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3462 if (rc) {
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3466 }
3467 rc = lpfc_init_active_sgl_array(phba);
3468 if (rc) {
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3472 }
3473
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3475 if (rc) {
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3479 }
3480
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3488 }
3489
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3497 }
3498
3499 return rc;
3500
3501out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3507out_free_sgl_list:
3508 lpfc_free_sgl_list(phba);
3509out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3511out_destroy_queue:
3512 lpfc_sli4_queue_destroy(phba);
3513out_free_bsmbx:
3514 lpfc_destroy_bootstrap_mbox(phba);
3515out_free_mem:
3516 lpfc_mem_free(phba);
3517 return rc;
3518}
3519
3520/**
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3523 *
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3526 **/
3527static void
3528lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3529{
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3531
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3534
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3537
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3540
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3543
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3546
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3550
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3553
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3556
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3560
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3563
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3566
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3569
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3573 kfree(conn_entry);
3574
3575 return;
3576}
3577
3578/**
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3582 *
3583 * This routine sets up the device INIT interface API function jump table
3584 * in @phba struct.
3585 *
3586 * Returns: 0 - success, -ENODEV - failure.
3587 **/
3588int
3589lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3590{
3591 switch (dev_grp) {
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3596 break;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3601 break;
3602 default:
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3605 dev_grp);
3606 return -ENODEV;
3607 break;
3608 }
3609 return 0;
3610}
3611
3612/**
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3615 *
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3618 *
3619 * Return codes
3620 * 0 - sucessful
3621 * other values - error
3622 **/
3623static int
3624lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3625{
3626 /*
3627 * Driver resources common to all SLI revisions
3628 */
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3631
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3634
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3638
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3641
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3645
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3648
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3651
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3654
3655 return 0;
3656}
3657
3658/**
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3664 *
3665 * Return codes
3666 * 0 - sucessful
3667 * other values - error
3668 **/
3669static int
3670lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3671{
3672 int error;
3673
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3679 return error;
3680 }
3681
3682 return 0;
3683}
3684
3685/**
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3688 *
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3691 * attached to.
3692 **/
3693static void
3694lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3695{
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3698}
3699
3700/**
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3703 *
3704 * This routine is invoked to free the driver's IOCB list and memory.
3705 **/
3706static void
3707lpfc_free_iocb_list(struct lpfc_hba *phba)
3708{
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3710
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3715 kfree(iocbq_entry);
3716 phba->total_iocbq_bufs--;
3717 }
3718 spin_unlock_irq(&phba->hbalock);
3719
3720 return;
3721}
3722
3723/**
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3729 *
3730 * Return codes
3731 * 0 - sucessful
3732 * other values - error
3733 **/
3734static int
3735lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3736{
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3738 uint16_t iotag;
3739 int i;
3740
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3743 for (i = 0; i < iocb_count; i++) {
3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
3748 __func__, i, LPFC_IOCB_LIST_CNT);
3749 goto out_free_iocbq;
3750 }
3751
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3753 if (iotag == 0) {
3754 kfree(iocbq_entry);
3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3756 "Unloading driver.\n", __func__);
3757 goto out_free_iocbq;
3758 }
3759 iocbq_entry->sli4_xritag = NO_XRI;
3760
3761 spin_lock_irq(&phba->hbalock);
3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
3764 spin_unlock_irq(&phba->hbalock);
3765 }
3766
3767 return 0;
3768
3769out_free_iocbq:
3770 lpfc_free_iocb_list(phba);
3771
3772 return -ENOMEM;
3773}
3774
3775/**
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3778 *
3779 * This routine is invoked to free the driver's sgl list and memory.
3780 **/
3781static void
3782lpfc_free_sgl_list(struct lpfc_hba *phba)
3783{
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3786 int rc = 0;
3787
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
3791
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3793 &sglq_list, list) {
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3796 kfree(sglq_entry);
3797 phba->sli4_hba.total_sglq_bufs--;
3798 }
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3800 if (rc) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3803 }
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3805}
3806
3807/**
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3810 *
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3813 **/
3814static int
3815lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3816{
3817 int size;
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3820
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3824 return -ENOMEM;
3825 return 0;
3826}
3827
3828/**
3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3830 * @phba: pointer to lpfc hba data structure.
3831 *
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3835 **/
3836static void
3837lpfc_free_active_sgl(struct lpfc_hba *phba)
3838{
3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3840}
3841
3842/**
3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3844 * @phba: pointer to lpfc hba data structure.
3845 *
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3848 *
3849 * Return codes
3850 * 0 - sucessful
3851 * other values - error
3852 **/
3853static int
3854lpfc_init_sgl_list(struct lpfc_hba *phba)
3855{
3856 struct lpfc_sglq *sglq_entry = NULL;
3857 int i;
3858 int els_xri_cnt;
3859
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3863 els_xri_cnt);
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3867
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3874 els_xri_cnt);
3875 return -ENOMEM;
3876 }
3877
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3881 GFP_KERNEL);
3882
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3887 els_xri_cnt);
3888 return -ENOMEM;
3889 }
3890
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3895
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3899
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3906 return -ENOMEM;
3907 }
3908
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3915 goto out_free_mem;
3916 }
3917
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3920 kfree(sglq_entry);
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3923 goto out_free_mem;
3924 }
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3928 kfree(sglq_entry);
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3935
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3942 }
3943 return 0;
3944
3945out_free_mem:
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3948 return -ENOMEM;
3949}
3950
3951/**
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3962 *
3963 * Return codes
3964 * 0 - sucessful
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3967 **/
3968int
3969lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3970{
3971 int rc = 0;
3972 int longs;
3973 uint16_t rpi_count;
3974 struct lpfc_rpi_hdr *rpi_hdr;
3975
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3977
3978 /*
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3981 */
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3984
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3987 GFP_KERNEL);
3988 if (!phba->sli4_hba.rpi_bmask)
3989 return -ENOMEM;
3990
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3992 if (!rpi_hdr) {
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
3996 rc = -ENODEV;
3997 }
3998
3999 return rc;
4000}
4001
4002/**
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4009 * by the device.
4010 *
4011 * Returns:
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4014 **/
4015struct lpfc_rpi_hdr *
4016lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4017{
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4021
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4024
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4028
4029 /*
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4033 */
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4035 return NULL;
4036
4037 /*
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4040 */
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4042 if (!dmabuf)
4043 return NULL;
4044
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4047 &dmabuf->phys,
4048 GFP_KERNEL);
4049 if (!dmabuf->virt) {
4050 rpi_hdr = NULL;
4051 goto err_free_dmabuf;
4052 }
4053
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4056 rpi_hdr = NULL;
4057 goto err_free_coherent;
4058 }
4059
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4062 if (!rpi_hdr)
4063 goto err_free_coherent;
4064
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4071
4072 /*
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4075 */
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4078 return rpi_hdr;
4079
4080 err_free_coherent:
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4083 err_free_dmabuf:
4084 kfree(dmabuf);
4085 return NULL;
4086}
4087
4088/**
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
2318 * @phba: pointer to lpfc hba data structure. 4090 * @phba: pointer to lpfc hba data structure.
2319 * 4091 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 4092 * This routine is invoked to remove all memory resources allocated
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 4093 * to support rpis. This routine presumes the caller has released all
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 4094 * rpis consumed by fabric or port logins and is prepared to have
2323 * on the current availability of PCI vector resources. The device driver is 4095 * the header pages removed.
2324 * responsible for calling the individual request_irq() to register each MSI-X 4096 **/
2325 * vector with a interrupt handler, which is done in this function. Note that 4097void
4098lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4099{
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4101
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4108 kfree(rpi_hdr);
4109 }
4110
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4113}
4114
4115/**
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4118 *
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4122 *
4123 * Return codes
4124 * pointer to @phba - sucessful
4125 * NULL - error
4126 **/
4127static struct lpfc_hba *
4128lpfc_hba_alloc(struct pci_dev *pdev)
4129{
4130 struct lpfc_hba *phba;
4131
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4134 if (!phba) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4137 return NULL;
4138 }
4139
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4142
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4146 kfree(phba);
4147 return NULL;
4148 }
4149
4150 return phba;
4151}
4152
4153/**
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4156 *
4157 * This routine is invoked to free the driver hba data structure with an
4158 * HBA device.
4159 **/
4160static void
4161lpfc_hba_free(struct lpfc_hba *phba)
4162{
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4165
4166 kfree(phba);
4167 return;
4168}
4169
4170/**
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4173 *
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4175 * host with it.
4176 *
4177 * Return codes
4178 * 0 - sucessful
4179 * other values - error
4180 **/
4181static int
4182lpfc_create_shost(struct lpfc_hba *phba)
4183{
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4186
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4192
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4194 if (!vport)
4195 return -ENODEV;
4196
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
4202
4203 return 0;
4204}
4205
4206/**
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to destroy HBA physical port and the associated
4211 * SCSI host.
4212 **/
4213static void
4214lpfc_destroy_shost(struct lpfc_hba *phba)
4215{
4216 struct lpfc_vport *vport = phba->pport;
4217
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4220
4221 return;
4222}
4223
4224/**
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4228 *
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4231 **/
4232static void
4233lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4234{
4235 int pagecnt = 10;
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4239 "SCSI layer\n");
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4242 }
4243 if (!_dump_buf_data) {
4244 while (pagecnt) {
4245 spin_lock_init(&_dump_buf_lock);
4246 _dump_buf_data =
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4255 break;
4256 } else
4257 --pagecnt;
4258 }
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4262 } else
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4266 while (pagecnt) {
4267 _dump_buf_dif =
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4276 break;
4277 } else
4278 --pagecnt;
4279 }
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4283 } else
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4285 _dump_buf_dif);
4286}
4287
4288/**
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4291 *
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4294 **/
4295static void
4296lpfc_post_init_setup(struct lpfc_hba *phba)
4297{
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4300
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4303
4304 /*
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4307 */
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4312
4313 lpfc_host_attrib_init(shost);
4314
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4319 }
4320
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4329 LPFC_NL_VENDOR_ID);
4330 return;
4331}
4332
4333/**
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4336 *
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4339 *
4340 * Return codes
4341 * 0 - sucessful
4342 * other values - error
4343 **/
4344static int
4345lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4346{
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4349 int i, hbq_count;
4350 void *ptr;
4351 int error = -ENODEV;
4352
4353 /* Obtain PCI device reference */
4354 if (!phba->pcidev)
4355 return error;
4356 else
4357 pdev = phba->pcidev;
4358
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4362 return error;
4363
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4366 */
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4369
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4372
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4378 goto out;
4379 }
4380
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4387 }
4388
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4391 SLI2_SLIM_SIZE,
4392 &phba->slim2p.phys,
4393 GFP_KERNEL);
4394 if (!phba->slim2p.virt)
4395 goto out_iounmap;
4396
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4402
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4406 GFP_KERNEL);
4407 if (!phba->hbqslimp.virt)
4408 goto out_free_slim;
4409
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4417 }
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4420
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4422
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4424
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4430
4431 return 0;
4432
4433out_free_slim:
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4436out_iounmap:
4437 iounmap(phba->ctrl_regs_memmap_p);
4438out_iounmap_slim:
4439 iounmap(phba->slim_memmap_p);
4440out:
4441 return error;
4442}
4443
4444/**
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4447 *
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4450 **/
4451static void
4452lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4453{
4454 struct pci_dev *pdev;
4455
4456 /* Obtain PCI device reference */
4457 if (!phba->pcidev)
4458 return;
4459 else
4460 pdev = phba->pcidev;
4461
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4467
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4471
4472 return;
4473}
4474
4475/**
4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4477 * @phba: pointer to lpfc hba data structure.
4478 *
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
4481 *
4482 * Return 0 if successful, otherwise -ENODEV.
4483 **/
4484int
4485lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4486{
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
4490
4491 if (!phba->sli4_hba.STAregaddr)
4492 return -ENODEV;
4493
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4507 }
4508 return -ENODEV;
4509 }
4510
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4517 break;
4518 }
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4521 port_error = 0;
4522 break;
4523 }
4524 msleep(10);
4525 }
4526
4527 if (port_error)
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4539
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4549
4550 return port_error;
4551}
4552
4553/**
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4556 *
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4558 * memory map.
4559 **/
4560static void
4561lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4562{
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE0;
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_ONLINE1;
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4572 LPFC_SCRATCHPAD;
4573}
4574
4575/**
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4584{
4585
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_STATE;
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_ISR0;
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_IMR0;
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4593 LPFC_HST_ISCR0;
4594 return;
4595}
4596
4597/**
4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 * @vf: virtual function number
4601 *
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4604 *
4605 * Return 0 if successful, otherwise -ENODEV.
4606 **/
4607static int
4608lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4609{
4610 if (vf > LPFC_VIR_FUNC_MAX)
4611 return -ENODEV;
4612
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4623 return 0;
4624}
4625
4626/**
4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4628 * @phba: pointer to lpfc hba data structure.
4629 *
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4635 * this routine.
4636 *
4637 * Return codes
4638 * 0 - sucessful
4639 * ENOMEM - could not allocated memory.
4640 **/
4641static int
4642lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4643{
4644 uint32_t bmbx_size;
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4647 uint32_t pa_addr;
4648 uint64_t phys_addr;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
4653
4654 /*
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4657 */
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 bmbx_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
4666 }
4667 memset(dmabuf->virt, 0, bmbx_size);
4668
4669 /*
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4675 */
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4678
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4683
4684 /*
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4691 */
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4697
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4701 return 0;
4702}
4703
4704/**
4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4706 * @phba: pointer to lpfc hba data structure.
4707 *
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4713 *
4714 **/
4715static void
4716lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4717{
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4722
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4725}
4726
4727/**
4728 * lpfc_sli4_read_config - Get the config parameters.
4729 * @phba: pointer to lpfc hba data structure.
4730 *
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
4735 *
4736 * Return codes
4737 * 0 - sucessful
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
4740 **/
4741static int
4742lpfc_sli4_read_config(struct lpfc_hba *phba)
4743{
4744 LPFC_MBOXQ_t *pmb;
4745 struct lpfc_mbx_read_config *rd_config;
4746 uint32_t rc = 0;
4747
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4749 if (!pmb) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4753 return -ENOMEM;
4754 }
4755
4756 lpfc_read_config(phba, pmb);
4757
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4765 rc = -EIO;
4766 } else {
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4805 "VPI(B:%d M:%d) "
4806 "VFI(B:%d M:%d) "
4807 "RPI(B:%d M:%d) "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
4819 }
4820 mempool_free(pmb, phba->mbox_mem_pool);
4821
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4826 return rc;
4827}
4828
4829/**
4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4831 * @phba: pointer to lpfc hba data structure.
4832 *
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4835 *
4836 * Return codes
4837 * 0 - sucessful
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
4840 **/
4841static int
4842lpfc_setup_endian_order(struct lpfc_hba *phba)
4843{
4844 LPFC_MBOXQ_t *mboxq;
4845 uint32_t rc = 0;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
4848
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4850 if (!mboxq) {
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4854 return -ENOMEM;
4855 }
4856
4857 /*
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4860 */
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4867 "status x%x\n",
4868 rc);
4869 rc = -EIO;
4870 }
4871
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4873 return rc;
4874}
4875
4876/**
4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
4878 * @phba: pointer to lpfc hba data structure.
4879 *
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4884 *
4885 * Return codes
4886 * 0 - sucessful
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
4889 **/
4890static int
4891lpfc_sli4_queue_create(struct lpfc_hba *phba)
4892{
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
4897
4898 /*
4899 * Sanity check for confiugred queue parameters against the run-time
4900 * device parameters
4901 */
4902
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4908 LPFC_SP_WQN_DEF;
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4913 "FCP WQs (%d)\n",
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4916 goto out_error;
4917 }
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4925 }
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4928
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4934 LPFC_SP_EQN_DEF;
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4939 "EQs (%d)\n",
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4942 goto out_error;
4943 }
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4951 }
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4962 }
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4967
4968 /*
4969 * Create Event Queues (EQs)
4970 */
4971
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4975
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4979 if (!qdesc) {
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4982 goto out_error;
4983 }
4984 phba->sli4_hba.sp_eq = qdesc;
4985
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4994 }
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4998 if (!qdesc) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5002 }
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5004 }
5005
5006 /*
5007 * Create Complete Queues (CQs)
5008 */
5009
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5013
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5017 if (!qdesc) {
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5021 }
5022 phba->sli4_hba.mbx_cq = qdesc;
5023
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5027 if (!qdesc) {
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5031 }
5032 phba->sli4_hba.els_cq = qdesc;
5033
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5037 if (!qdesc) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5041 }
5042 phba->sli4_hba.rxq_cq = qdesc;
5043
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5052 }
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5056 if (!qdesc) {
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5061 }
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5063 }
5064
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5068
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5071 if (!qdesc) {
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5075 }
5076 phba->sli4_hba.mbx_wq = qdesc;
5077
5078 /*
5079 * Create all the Work Queues (WQs)
5080 */
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5083
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5087 if (!qdesc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5091 }
5092 phba->sli4_hba.els_wq = qdesc;
5093
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5102 }
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5106 if (!qdesc) {
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5111 }
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5113 }
5114
5115 /*
5116 * Create Receive Queue (RQ)
5117 */
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5120
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5124 if (!qdesc) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5128 }
5129 phba->sli4_hba.hdr_rq = qdesc;
5130
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5134 if (!qdesc) {
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5138 }
5139 phba->sli4_hba.dat_rq = qdesc;
5140
5141 return 0;
5142
5143out_free_hdr_rq:
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5146out_free_fcp_wq:
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5150 }
5151 kfree(phba->sli4_hba.fcp_wq);
5152out_free_els_wq:
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5155out_free_mbx_wq:
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5158out_free_fcp_cq:
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5162 }
5163 kfree(phba->sli4_hba.fcp_cq);
5164out_free_rxq_cq:
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5167out_free_els_cq:
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5170out_free_mbx_cq:
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5173out_free_fp_eq:
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5177 }
5178 kfree(phba->sli4_hba.fp_eq);
5179out_free_sp_eq:
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5182out_error:
5183 return -ENOMEM;
5184}
5185
5186/**
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5191 * operation.
5192 *
5193 * Return codes
5194 * 0 - sucessful
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5197 **/
5198static void
5199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5200{
5201 int fcp_qidx;
5202
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5206
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5210
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5216
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5222
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5226
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5230
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5234
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5240
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5246
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5250
5251 return;
5252}
5253
5254/**
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5257 *
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5259 * operation.
5260 *
5261 * Return codes
5262 * 0 - sucessful
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5265 **/
5266int
5267lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5268{
5269 int rc = -ENOMEM;
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5272
5273 /*
5274 * Set up Event Queues (EQs)
5275 */
5276
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5281 goto out_error;
5282 }
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5284 LPFC_SP_DEF_IMAX);
5285 if (rc) {
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5288 "rc = 0x%x\n", rc);
5289 goto out_error;
5290 }
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5294
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5302 }
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5305 if (rc) {
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5310 }
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5315 }
5316
5317 /*
5318 * Set up Complete Queues (CQs)
5319 */
5320
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5326 }
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5329 if (rc) {
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5332 "rc = 0x%x\n", rc);
5333 goto out_destroy_fp_eq;
5334 }
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5339
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5345 }
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5348 if (rc) {
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5351 "rc = 0x%x\n", rc);
5352 goto out_destroy_mbx_cq;
5353 }
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5358
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5364 }
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5367 if (rc) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5370 "rc = 0x%x\n", rc);
5371 goto out_destroy_els_cq;
5372 }
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5377
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5385 }
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5389 if (rc) {
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5394 }
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5398 fcp_cqidx,
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5400 fcp_cqidx,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5402 }
5403
5404 /*
5405 * Set up all the Work Queues (WQs)
5406 */
5407
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5413 }
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5416 if (rc) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5419 "rc = 0x%x\n", rc);
5420 goto out_destroy_fcp_cq;
5421 }
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5426
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5432 }
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5435 if (rc) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5438 "rc = 0x%x\n", rc);
5439 goto out_destroy_mbx_wq;
5440 }
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5445
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5453 }
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5456 LPFC_FCP);
5457 if (rc) {
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5462 }
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5466 fcp_wqidx,
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5468 fcp_cq_index,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5472 }
5473
5474 /*
5475 * Create Receive Queue (RQ)
5476 */
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5481 }
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5484 if (rc) {
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5487 "rc = 0x%x\n", rc);
5488 goto out_destroy_fcp_wq;
5489 }
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5496 return 0;
5497
5498out_destroy_fcp_wq:
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5502out_destroy_mbx_wq:
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5504out_destroy_fcp_cq:
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5508out_destroy_els_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5510out_destroy_mbx_cq:
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5512out_destroy_fp_eq:
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5516out_error:
5517 return rc;
5518}
5519
5520/**
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5523 *
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5525 * operation.
5526 *
5527 * Return codes
5528 * 0 - sucessful
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5531 **/
5532void
5533lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5534{
5535 int fcp_qidx;
5536
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5560}
5561
5562/**
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5565 *
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5573 *
5574 * Return codes
5575 * 0 - sucessful
5576 * -ENOMEM - No availble memory
5577 **/
5578static int
5579lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5580{
5581 struct lpfc_cq_event *cq_event;
5582 int i;
5583
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5586 if (!cq_event)
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5590 }
5591 return 0;
5592
5593out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5595 return -ENOMEM;
5596}
5597
5598/**
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5601 *
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5607 **/
5608static void
5609lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5610{
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5612
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5616 kfree(cq_event);
5617 }
5618}
5619
5620/**
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5626 *
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5628 * NULL otherwise.
5629 **/
5630struct lpfc_cq_event *
5631__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5632{
5633 struct lpfc_cq_event *cq_event = NULL;
5634
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5637 return cq_event;
5638}
5639
5640/**
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5643 *
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5646 *
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5648 * NULL otherwise.
5649 **/
5650struct lpfc_cq_event *
5651lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5652{
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5659 return cq_event;
5660}
5661
5662/**
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5666 *
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5669 **/
5670void
5671__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5673{
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5675}
5676
5677/**
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5681 *
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5684 **/
5685void
5686lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5688{
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5693}
5694
5695/**
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5701 **/
5702static void
5703lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5704{
5705 LIST_HEAD(cqelist);
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5708
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5713 &cqelist);
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5716 &cqelist);
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5719 &cqelist);
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5721
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5725 }
5726}
5727
5728/**
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5731 *
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5734 *
5735 * Return codes
5736 * 0 - sucessful
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5739 **/
5740int
5741lpfc_pci_function_reset(struct lpfc_hba *phba)
5742{
5743 LPFC_MBOXQ_t *mboxq;
5744 uint32_t rc = 0;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5747
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5749 if (!mboxq) {
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5753 return -ENOMEM;
5754 }
5755
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5772 rc = -ENXIO;
5773 }
5774 return rc;
5775}
5776
5777/**
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5781 *
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5784 *
5785 * Return: the number of NOP mailbox command completed.
5786 **/
5787static int
5788lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5789{
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5792 uint32_t mbox_tmo;
5793 uint32_t rc = 0;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5796
5797 if (cnt == 0) {
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5800 return cnt;
5801 }
5802
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5804 if (!mboxq) {
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5808 return 0;
5809 }
5810
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5816
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 else
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5824 break;
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5830 &shdr->response);
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5837 break;
5838 }
5839 }
5840
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5843
5844 return cmdsent;
5845}
5846
5847/**
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5850 * @fcfi: fcf index.
5851 *
5852 * This routine is invoked to unregister a FCFI from device.
5853 **/
5854void
5855lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5856{
5857 LPFC_MBOXQ_t *mbox;
5858 uint32_t mbox_tmo;
5859 int rc;
5860 unsigned long flags;
5861
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863
5864 if (!mbox)
5865 return;
5866
5867 lpfc_unreg_fcfi(mbox, fcfi);
5868
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5871 else {
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5874 }
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5882 else {
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5888 }
5889}
5890
5891/**
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5897 *
5898 * Return codes
5899 * 0 - sucessful
5900 * other values - error
5901 **/
5902static int
5903lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5904{
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5908
5909 /* Obtain PCI device reference */
5910 if (!phba->pcidev)
5911 return error;
5912 else
5913 pdev = phba->pcidev;
5914
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5918 return error;
5919
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5923 */
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5926
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5929
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5932
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5939 goto out;
5940 }
5941
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5949 }
5950
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5958 }
5959
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5962
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5965
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5968 if (error)
5969 goto out_iounmap_all;
5970
5971 return 0;
5972
5973out_iounmap_all:
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5975out_iounmap_ctrl:
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5977out_iounmap_conf:
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5979out:
5980 return error;
5981}
5982
5983/**
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5989 **/
5990static void
5991lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5992{
5993 struct pci_dev *pdev;
5994
5995 /* Obtain PCI device reference */
5996 if (!phba->pcidev)
5997 return;
5998 else
5999 pdev = phba->pcidev;
6000
6001 /* Free coherent DMA memory allocated */
6002
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6007
6008 return;
6009}
6010
6011/**
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6014 *
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6022 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6023 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6029 * other values - error
2334 **/ 6030 **/
2335static int 6031static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6032lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6033{
2338 int rc, i; 6034 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6035 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6047 goto msi_fail_out;
2352 } else 6048 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6051 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6052 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6053 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6054 phba->msix_entries[i].entry);
2359 /* 6055 /*
2360 * Assign MSI-X vectors to interrupt handlers 6056 * Assign MSI-X vectors to interrupt handlers
2361 */ 6057 */
2362 6058
2363 /* vector-0 is associated to slow-path handler */ 6059 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6060 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6063 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6065 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6068 }
2372 6069
2373 /* vector-1 is associated to fast-path handler */ 6070 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6071 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6074
2377 if (rc) { 6075 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6100 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6101 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6103 goto mbx_fail_out;
2406 } 6104 }
2407 6105
@@ -2428,14 +6126,14 @@ msi_fail_out:
2428} 6126}
2429 6127
2430/** 6128/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6130 * @phba: pointer to lpfc hba data structure.
2433 * 6131 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6132 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6134 **/
2437static void 6135static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6136lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6137{
2440 int i; 6138 int i;
2441 6139
@@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6142 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6143 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6144 pci_disable_msix(phba->pcidev);
6145
6146 return;
2447} 6147}
2448 6148
2449/** 6149/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6151 * @phba: pointer to lpfc hba data structure.
2452 * 6152 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6153 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6155 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
2457 * 6158 *
2458 * Return codes 6159 * Return codes
2459 * 0 - sucessful 6160 * 0 - sucessful
2460 * other values - error 6161 * other values - error
2461 */ 6162 */
2462static int 6163static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6164lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6165{
2465 int rc; 6166 int rc;
2466 6167
@@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6175 return rc;
2475 } 6176 }
2476 6177
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6180 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6181 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6186}
2486 6187
2487/** 6188/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6190 * @phba: pointer to lpfc hba data structure.
2490 * 6191 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6192 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6196 * its vector.
2495 */ 6197 */
2496
2497static void 6198static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6199lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6200{
2500 free_irq(phba->pcidev->irq, phba); 6201 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6202 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6204}
2504 6205
2505/** 6206/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6208 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6209 *
2510 * This routine it invoked to log the currently used active interrupt mode 6210 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6215 * of:
6216 * MSI-X -> MSI -> IRQ.
6217 *
6218 * Return codes
6219 * 0 - sucessful
6220 * other values - error
6221 **/
6222static uint32_t
6223lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6224{
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6226 int retval;
6227
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6231 if (!retval) {
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6234 if (!retval) {
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6237 intr_mode = 2;
6238 }
6239 }
6240 }
6241
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6245 if (!retval) {
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6248 intr_mode = 1;
6249 }
6250 }
6251
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6256 if (!retval) {
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6259 intr_mode = 0;
6260 }
6261 }
6262 return intr_mode;
6263}
6264
6265/**
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6268 *
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6273 **/
2513static void 6274static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6275lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6276{
2516 switch (intr_mode) { 6277 /* Disable the currently initialized interrupt mode */
2517 case 0: 6278 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6279 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6280 else if (phba->intr_type == MSI)
2520 break; 6281 lpfc_sli_disable_msi(phba);
2521 case 1: 6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6284
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6288
6289 return;
6290}
6291
6292/**
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6307 *
6308 * Return codes
6309 * 0 - sucessful
6310 * other values - error
6311 **/
6312static int
6313lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6314{
6315 int rc, index;
6316
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6320
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6324 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6327 goto msi_fail_out;
2525 case 2: 6328 }
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6332 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6333 "message=%d\n", index,
2529 default: 6334 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6335 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6336 /*
2532 break; 6337 * Assign MSI-X vectors to interrupt handlers
6338 */
6339
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6347 "(%d)\n", rc);
6348 goto msi_fail_out;
2533 } 6349 }
2534 return; 6350
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6359 if (rc) {
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6363 goto cfg_fail_out;
6364 }
6365 }
6366
6367 return rc;
6368
6369cfg_fail_out:
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6374
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6377
6378msi_fail_out:
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6381 return rc;
2535} 6382}
2536 6383
6384/**
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6387 *
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6390 **/
2537static void 6391static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6392lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6393{
2540 /* Clear all interrupt enable conditions */ 6394 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6395
2547 /* Reset some HBA SLI setup states */ 6396 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6398
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6402 /* Disable MSI-X */
6403 pci_disable_msix(phba->pcidev);
6404
6405 return;
6406}
6407
6408/**
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6411 *
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6417 *
6418 * Return codes
6419 * 0 - sucessful
6420 * other values - error
6421 **/
6422static int
6423lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6424{
6425 int rc, index;
6426
6427 rc = pci_enable_msi(phba->pcidev);
6428 if (!rc)
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6431 else {
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6434 return rc;
6435 }
6436
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6439 if (rc) {
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6443 }
6444
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6448 }
6449
6450 return rc;
6451}
2550 6452
6453/**
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6456 *
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6461 * its vector.
6462 **/
6463static void
6464lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6465{
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
2551 return; 6468 return;
2552} 6469}
2553 6470
2554/** 6471/**
2555 * lpfc_enable_intr - Enable device interrupt 6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6473 * @phba: pointer to lpfc hba data structure.
2557 * 6474 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6475 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6477 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6478 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6479 * interrupt mode which is supported by the platform, kernel, and device in
6480 * the order of:
6481 * MSI-X -> MSI -> IRQ.
2563 * 6482 *
2564 * Return codes 6483 * Return codes
2565 * 0 - sucessful 6484 * 0 - sucessful
2566 * other values - error 6485 * other values - error
2567 **/ 6486 **/
2568static uint32_t 6487static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6488lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6489{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6490 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6491 int retval, index;
2573 6492
2574 if (cfg_mode == 2) { 6493 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6494 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6495 retval = 0;
2577 if (!retval) { 6496 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6497 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6498 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6499 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6500 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6501 phba->intr_type = MSIX;
@@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6506
2588 /* Fallback to MSI if MSI-X initialization failed */ 6507 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6509 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6510 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6511 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6512 phba->intr_type = MSI;
@@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6516
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6518 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6521 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6522 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6523 phba->intr_type = INTx;
2605 intr_mode = 0; 6524 intr_mode = 0;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6526 index++) {
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529 }
2606 } 6530 }
2607 } 6531 }
2608 return intr_mode; 6532 return intr_mode;
2609} 6533}
2610 6534
2611/** 6535/**
2612 * lpfc_disable_intr - Disable device interrupt 6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6537 * @phba: pointer to lpfc hba data structure.
2614 * 6538 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6539 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6542 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6543 **/
2620static void 6544static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6545lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6546{
2623 /* Disable the currently initialized interrupt mode */ 6547 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6548 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6549 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6550 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6551 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6552 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6553 free_irq(phba->pcidev->irq, phba);
2630 6554
@@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6560}
2637 6561
2638/** 6562/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6564 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6565 *
2651 * Return code 6566 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6567 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6568 **/
2655static int __devinit 6569static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6570lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6571{
2658 struct lpfc_vport *vport = NULL; 6572 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676 6574
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 6575 spin_lock_irq(shost->host_lock);
2678 if (!phba) 6576 vport->load_flag |= FC_UNLOADING;
2679 goto out_release_regions; 6577 spin_unlock_irq(shost->host_lock);
2680 6578
2681 atomic_set(&phba->fast_event_count, 0); 6579 lpfc_stop_hba_timers(phba);
2682 spin_lock_init(&phba->hbalock);
2683 6580
2684 /* Initialize ndlp management spinlock */ 6581 phba->pport->work_port_events = 0;
2685 spin_lock_init(&phba->ndlp_lock);
2686 6582
2687 phba->pcidev = pdev; 6583 lpfc_sli_hba_down(phba);
2688 6584
2689 /* Assign an unused board number */ 6585 lpfc_sli_brdrestart(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6586
2693 INIT_LIST_HEAD(&phba->port_list); 6587 lpfc_sli_disable_intr(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6588
2702 /* Initialize timers used by driver */ 6589 return;
2703 init_timer(&phba->hb_tmofunc); 6590}
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6591
2707 psli = &phba->sli; 6592/**
2708 init_timer(&psli->mbox_tmo); 6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2709 psli->mbox_tmo.function = lpfc_mbox_timeout; 6594 * @phba: pointer to lpfc hba data structure.
2710 psli->mbox_tmo.data = (unsigned long) phba; 6595 *
2711 init_timer(&phba->fcp_poll_timer); 6596 * This routine is invoked to unset the HBA device initialization steps to
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout; 6597 * a device with SLI-4 interface spec.
2713 phba->fcp_poll_timer.data = (unsigned long) phba; 6598 **/
2714 init_timer(&phba->fabric_block_timer); 6599static void
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 6600lpfc_sli4_unset_hba(struct lpfc_hba *phba)
2716 phba->fabric_block_timer.data = (unsigned long) phba; 6601{
2717 init_timer(&phba->eratt_poll); 6602 struct lpfc_vport *vport = phba->pport;
2718 phba->eratt_poll.function = lpfc_poll_eratt; 6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6604
2721 pci_set_master(pdev); 6605 spin_lock_irq(shost->host_lock);
2722 pci_save_state(pdev); 6606 vport->load_flag |= FC_UNLOADING;
2723 pci_try_set_mwi(pdev); 6607 spin_unlock_irq(shost->host_lock);
2724 6608
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6609 phba->pport->work_port_events = 0;
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
2727 goto out_idr_remove;
2728 6610
2729 /* 6611 lpfc_sli4_hba_down(phba);
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6612
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6613 lpfc_sli4_disable_intr(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6614
2739 /* Map HBA SLIM to a kernel virtual address. */ 6615 return;
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6616}
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747
2748 /* Map HBA Control Registers to a kernel virtual address. */
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6617
2757 /* Allocate memory for SLI-2 structures */ 6618/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6620 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6621 *
2761 GFP_KERNEL); 6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6623 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6627 **/
6628static void
6629lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6630{
6631 int wait_cnt = 0;
6632 LPFC_MBOXQ_t *mboxq;
2764 6633
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6634 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6635 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6636
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6637 /*
2772 lpfc_sli_hbq_size(), 6638 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6639 * mailbox command.
2774 GFP_KERNEL); 6640 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6641
2778 hbq_count = lpfc_sli_hbq_count(); 6642 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6643 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6645 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6646 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6648 msleep(10);
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6650 break;
2785 } 6651 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6652 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6654 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6655 mboxq = phba->sli.mbox_active;
2790 6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6657 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6659 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6660 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6661 }
2802 6662
2803 /* Initialize and populate the iocb list per host. */ 6663 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6664 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6665
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6666 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6667 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6668
2825 spin_lock_irq(&phba->hbalock); 6669 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6670 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6671
2831 /* Initialize HBA structure */ 6672 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6673 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6674}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6675
2837 INIT_LIST_HEAD(&phba->work_list); 6676/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6680 *
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6688 *
6689 * Return code
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6692 **/
6693static int __devinit
6694lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6695{
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6698 int error;
6699 uint32_t cfg_mode, intr_mode;
2840 6700
2841 /* Initialize the wait queue head for the kernel thread */ 6701 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6702 phba = lpfc_hba_alloc(pdev);
6703 if (!phba)
6704 return -ENOMEM;
2843 6705
2844 /* Startup the kernel thread for this host adapter. */ 6706 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6707 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6708 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6710 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6711 goto out_free_phba;
2850 } 6712 }
2851 6713
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6716 if (error)
6717 goto out_disable_pci_dev;
2855 6718
2856 /* Initialize list of fabric iocbs */ 6719 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6720 error = lpfc_sli_pci_mem_setup(phba);
6721 if (error) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6725 }
2858 6726
2859 /* Initialize list to save ELS buffers */ 6727 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6728 error = lpfc_setup_driver_resource_phase1(phba);
6729 if (error) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6733 }
2861 6734
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6735 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6736 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6737 if (error) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6741 }
2865 6742
2866 shost = lpfc_shost_from_vport(vport); 6743 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6745 if (error) {
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6749 }
2869 6750
2870 pci_set_drvdata(pdev, shost); 6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6753 if (error) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6757 }
2871 6758
2872 phba->MBslimaddr = phba->slim_memmap_p; 6759 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6760 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6761 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6765 }
2877 6766
2878 /* Configure sysfs attributes */ 6767 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6770 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6772 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6773 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6774 }
2885 6775
6776 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6777 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6778 while (true) {
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6781 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6783 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6785 "0431 Failed to enable interrupt.\n");
6786 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6787 goto out_free_sysfs_attr;
2894 } 6788 }
2895 /* HBA SLI setup */ 6789 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6790 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6792 "1477 Failed to set up hba\n");
@@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6796
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6797 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6798 msleep(50);
2905 /* Check active interrupts received */ 6799 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6802 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6803 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6804 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6805 break;
2911 } else { 6806 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6808 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6809 "failed active interrupt test.\n",
2915 intr_mode); 6810 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6811 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6812 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6813 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6814 cfg_mode = --intr_mode;
2929 } 6815 }
2930 } 6816 }
2931 6817
2932 /* 6818 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6819 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6820
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6821 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6822 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6823
3025 return 0; 6824 return 0;
3026 6825
3027out_remove_device: 6826out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6827 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6828out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6829 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6830out_destroy_shost:
3039 destroy_port(vport); 6831 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6832out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6833 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6834out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6835 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6836out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6837 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6838out_unset_pci_mem_s3:
3047 } 6839 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6840out_disable_pci_dev:
3049out_free_hbqslimp: 6841 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6842out_free_phba:
3062 kfree(phba); 6843 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6844 return error;
3072} 6845}
3073 6846
3074/** 6847/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6849 * @pdev: pointer to PCI device
3077 * 6850 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6851 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
3081 **/ 6855 **/
3082static void __devexit 6856static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6857lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6858{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6872 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6873 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6874 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6876 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6877 lpfc_destroy_vport_work_array(phba, vports);
3104 6878
@@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6894 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6895 lpfc_sli_brdrestart(phba);
3122 6896
3123 lpfc_stop_phba_timers(phba); 6897 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6898 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6899 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6900 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6902 lpfc_debugfs_terminate(vport);
3129 6903
3130 /* Disable interrupt */ 6904 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6905 lpfc_sli_disable_intr(phba);
3132 6906
3133 pci_set_drvdata(pdev, NULL); 6907 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6908 scsi_host_put(shost);
@@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6912 * corresponding pools here.
3139 */ 6913 */
3140 lpfc_scsi_free(phba); 6914 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6915 lpfc_mem_free_all(phba);
3142 6916
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6925 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6926 iounmap(phba->slim_memmap_p);
3153 6927
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6928 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6929
3158 pci_release_selected_regions(pdev, bars); 6930 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6931 pci_disable_device(pdev);
3160} 6932}
3161 6933
3162/** 6934/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6936 * @pdev: pointer to PCI device
3165 * @msg: power management message 6937 * @msg: power management message
3166 * 6938 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6939 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6940 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6941 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6942 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6943 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6944 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6946 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6947 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
3177 * 6950 *
3178 * Return code 6951 * Return code
3179 * 0 - driver suspended the device 6952 * 0 - driver suspended the device
3180 * Error otherwise 6953 * Error otherwise
3181 **/ 6954 **/
3182static int 6955static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6956lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6957{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6967 kthread_stop(phba->worker_thread);
3195 6968
3196 /* Disable interrupt from device */ 6969 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6970 lpfc_sli_disable_intr(phba);
3198 6971
3199 /* Save device state to PCI config space */ 6972 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6973 pci_save_state(pdev);
@@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6977}
3205 6978
3206/** 6979/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6981 * @pdev: pointer to PCI device
3209 * 6982 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6983 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6985 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6986 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6987 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6990 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
3219 * 6993 *
3220 * Return code 6994 * Return code
3221 * 0 - driver suspended the device 6995 * 0 - driver suspended the device
3222 * Error otherwise 6996 * Error otherwise
3223 **/ 6997 **/
3224static int 6998static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6999lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 7000{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7024 }
3251 7025
3252 /* Configure and enable interrupt */ 7026 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7028 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7030 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7043}
3270 7044
3271/** 7045/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7047 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7048 * @state: the current PCI connection state.
3275 * 7049 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7050 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7051 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7052 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7053 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7054 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7056 * as desired.
3282 * 7057 *
3283 * Return codes 7058 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7061 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7062static pci_ers_result_t
3288 pci_channel_state_t state) 7063lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7064{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7087 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7088
3314 /* Disable interrupt */ 7089 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7090 lpfc_sli_disable_intr(phba);
3316 7091
3317 /* Request a slot reset. */ 7092 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7093 return PCI_ERS_RESULT_NEED_RESET;
3319} 7094}
3320 7095
3321/** 7096/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7098 * @pdev: pointer to PCI device.
3324 * 7099 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7100 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7101 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7103 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7105 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7106 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
3333 * 7109 *
3334 * Return codes 7110 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7113 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7114static pci_ers_result_t
7115lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7116{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7131 pci_set_master(pdev);
3355 7132
3356 spin_lock_irq(&phba->hbalock); 7133 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7135 spin_unlock_irq(&phba->hbalock);
3359 7136
3360 /* Configure and enable interrupt */ 7137 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7139 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7141 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7155}
3379 7156
3380/** 7157/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7159 * @pdev: pointer to PCI device
3383 * 7160 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7161 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7164 * error recovery. After this call, traffic can start to flow from this device
7165 * again.
3388 */ 7166 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7167static void
7168lpfc_io_resume_s3(struct pci_dev *pdev)
7169{
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7172
7173 lpfc_online(phba);
7174}
7175
7176/**
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7179 *
7180 * returns the number of ELS/CT IOCBs to reserve
7181 **/
7182int
7183lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186
7187 if (max_xri <= 100)
7188 return 4;
7189 else if (max_xri <= 256)
7190 return 8;
7191 else if (max_xri <= 512)
7192 return 16;
7193 else if (max_xri <= 1024)
7194 return 32;
7195 else
7196 return 48;
7197}
7198
7199/**
7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7203 *
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7211 * properly.
7212 *
7213 * Return code
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7216 **/
7217static int __devinit
7218lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7219{
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7222 int error;
7223 uint32_t cfg_mode, intr_mode;
7224 int mcnt;
7225
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7228 if (!phba)
7229 return -ENOMEM;
7230
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7233 if (error) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7235 "1409 Failed to enable pci device.\n");
7236 goto out_free_phba;
7237 }
7238
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7241 if (error)
7242 goto out_disable_pci_dev;
7243
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
7246 if (error) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7248 "1410 Failed to set up pci memory space.\n");
7249 goto out_disable_pci_dev;
7250 }
7251
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7254 if (error) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
7258 }
7259
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
7262 if (error) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
7266 }
7267
7268 /* Initialize and populate the iocb list per host */
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
7271 if (error) {
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
7275 }
7276
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7279 if (error) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281 "1414 Failed to set up driver resource.\n");
7282 goto out_free_iocb_list;
7283 }
7284
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7287 if (error) {
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289 "1415 Failed to create scsi host.\n");
7290 goto out_unset_driver_resource;
7291 }
7292
7293 /* Configure sysfs attributes */
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7296 if (error) {
7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298 "1416 Failed to allocate sysfs attr\n");
7299 goto out_destroy_shost;
7300 }
7301
7302 /* Now, trying to enable interrupt and bring up the device */
7303 cfg_mode = phba->cfg_use_msi;
7304 while (true) {
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
7307 /* Configure and enable interrupt */
7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 "0426 Failed to enable interrupt.\n");
7312 error = -ENODEV;
7313 goto out_free_sysfs_attr;
7314 }
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "1421 Failed to set up hba\n");
7319 error = -ENODEV;
7320 goto out_disable_intr;
7321 }
7322
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7324 if (intr_mode != 0)
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7326 LPFC_ACT_INTR_CNT);
7327
7328 /* Check active interrupts received only for MSI/MSI-X */
7329 if (intr_mode == 0 ||
7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7334 break;
7335 }
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7339 intr_mode);
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
7344 }
7345
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
7348
7349 return 0;
7350
7351out_disable_intr:
7352 lpfc_sli4_disable_intr(phba);
7353out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
7355out_destroy_shost:
7356 lpfc_destroy_shost(phba);
7357out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7359out_free_iocb_list:
7360 lpfc_free_iocb_list(phba);
7361out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
7365out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
7367out_free_phba:
7368 lpfc_hba_free(phba);
7369 return error;
7370}
7371
7372/**
7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7374 * @pdev: pointer to PCI device
7375 *
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
7380 **/
7381static void __devexit
7382lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7383{
7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7386 struct lpfc_vport **vports;
7387 struct lpfc_hba *phba = vport->phba;
7388 int i;
7389
7390 /* Mark the device unloading flag */
7391 spin_lock_irq(&phba->hbalock);
7392 vport->load_flag |= FC_UNLOADING;
7393 spin_unlock_irq(&phba->hbalock);
7394
7395 /* Free the HBA sysfs attributes */
7396 lpfc_free_sysfs_attr(vport);
7397
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7400 if (vports != NULL)
7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7404
7405 /* Remove FC host and then SCSI host with the physical port */
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
7408
7409 /* Perform cleanup on the physical port */
7410 lpfc_cleanup(vport);
7411
7412 /*
7413 * Bring down the SLI Layer. This step disables all interrupts,
7414 * clears the rings, discards all mailbox commands, and resets
7415 * the HBA FCoE function.
7416 */
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
7419
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7423
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
7426 */
7427 lpfc_scsi_free(phba);
7428 lpfc_sli4_driver_resource_unset(phba);
7429
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
7432
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
7436
7437 /* Finally, free the driver's device data structure */
7438 lpfc_hba_free(phba);
7439
7440 return;
7441}
7442
7443/**
7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7447 *
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
7458 * according to the @msg provided by the PM.
7459 *
7460 * Return code
7461 * 0 - driver suspended the device
7462 * Error otherwise
7463 **/
7464static int
7465lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7466{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7469
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0298 PCI device Power Management suspend.\n");
7472
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7475 lpfc_offline(phba);
7476 kthread_stop(phba->worker_thread);
7477
7478 /* Disable interrupt from device */
7479 lpfc_sli4_disable_intr(phba);
7480
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7484
7485 return 0;
7486}
7487
7488/**
7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7490 * @pdev: pointer to PCI device
7491 *
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7501 * state.
7502 *
7503 * Return code
7504 * 0 - driver suspended the device
7505 * Error otherwise
7506 **/
7507static int
7508lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7509{
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7512 uint32_t intr_mode;
7513 int error;
7514
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0292 PCI device Power Management resume.\n");
7517
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7523
7524 /* Startup the kernel thread for this host adapter. */
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7530 "0293 PM resume failed to start worker "
7531 "thread: error=x%x.\n", error);
7532 return error;
7533 }
7534
7535 /* Configure and enable interrupt */
7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7537 if (intr_mode == LPFC_INTR_ERROR) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7539 "0294 PM resume Failed to enable interrupt\n");
7540 return -EIO;
7541 } else
7542 phba->intr_mode = intr_mode;
7543
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7546 lpfc_online(phba);
7547
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7550
7551 return 0;
7552}
7553
7554/**
7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
7558 *
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
7565 *
7566 * Return codes
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7569 **/
7570static pci_ers_result_t
7571lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7572{
7573 return PCI_ERS_RESULT_NEED_RESET;
7574}
7575
7576/**
7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7578 * @pdev: pointer to PCI device.
7579 *
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
7589 *
7590 * Return codes
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7593 */
7594static pci_ers_result_t
7595lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7596{
7597 return PCI_ERS_RESULT_RECOVERED;
7598}
7599
7600/**
7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7602 * @pdev: pointer to PCI device
7603 *
7604 * This routine is called from the PCI subsystem for error handling to device
7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7608 * again.
7609 **/
7610static void
7611lpfc_io_resume_s4(struct pci_dev *pdev)
7612{
7613 return;
7614}
7615
7616/**
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7620 *
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7628 * properly.
7629 *
7630 * Return code
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7633 **/
7634static int __devinit
7635lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7636{
7637 int rc;
7638 uint16_t dev_id;
7639
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7641 return -ENODEV;
7642
7643 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7946 { 0 }
3473}; 7947};
3474 7948
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7960 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7961 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7962 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7963 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7964 .err_handler = &lpfc_err_handler,
3491}; 7965};
3492 7966
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b012..954ba57970a3 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc2127..b9b451c09010 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0)
1637 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1638 resid_len = length - alloc_len;
1639 if (resid_len > PAGE_SIZE) {
1640 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1641 PAGE_SIZE);
1642 alloc_len += PAGE_SIZE;
1643 } else {
1644 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1645 resid_len);
1646 alloc_len = length;
1647 }
1648 }
1649
1650 /* Set up main header fields in mailbox command */
1651 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1652 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1653
1654 /* Set up sub-header fields into the first page */
1655 if (pagen > 0) {
1656 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1657 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1658 cfg_shdr->request.request_length =
1659 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1660 }
1661 /* The sub-header is in DMA memory, which needs endian converstion */
1662 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1663 sizeof(union lpfc_sli4_cfg_shdr));
1664
1665 return alloc_len;
1666}
1667
1668/**
1669 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1670 * @phba: pointer to lpfc hba data structure.
1671 * @mbox: pointer to lpfc mbox command.
1672 *
1673 * This routine gets the opcode from a SLI4 specific mailbox command for
1674 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1675 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1676 * returned.
1677 **/
1678uint8_t
1679lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1680{
1681 struct lpfc_mbx_sli4_config *sli4_cfg;
1682 union lpfc_sli4_cfg_shdr *cfg_shdr;
1683
1684 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1685 return 0;
1686 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1687
1688 /* For embedded mbox command, get opcode from embedded sub-header*/
1689 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1690 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1691 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1692 }
1693
1694 /* For non-embedded mbox command, get opcode from first dma page */
1695 if (unlikely(!mbox->sge_array))
1696 return 0;
1697 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1698 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1699}
1700
1701/**
1702 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1703 * @mboxq: pointer to lpfc mbox command.
1704 *
1705 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1706 * mailbox command.
1707 **/
1708void
1709lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1710{
1711 /* Set up SLI4 mailbox command header fields */
1712 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1713 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1714
1715 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717
1718 /* Virtual fabrics and FIPs are not supported yet. */
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1720
1721 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg)
1723 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1724
1725 /* Enable NPIV only if configured to do so. */
1726 if (phba->max_vpi && phba->cfg_enable_npiv)
1727 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1728
1729 return;
1730}
1731
1732/**
1733 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1734 * @mbox: pointer to lpfc mbox command to initialize.
1735 * @vport: Vport associated with the VF.
1736 *
1737 * This routine initializes @mbox to all zeros and then fills in the mailbox
1738 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1739 * in the context of an FCF. The driver issues this command to setup a VFI
1740 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1741 * REG_VFI after a successful VSAN login.
1742 **/
1743void
1744lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1745{
1746 struct lpfc_mbx_init_vfi *init_vfi;
1747
1748 memset(mbox, 0, sizeof(*mbox));
1749 init_vfi = &mbox->u.mqe.un.init_vfi;
1750 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1751 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1752 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1753 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1754 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1755}
1756
1757/**
1758 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1759 * @mbox: pointer to lpfc mbox command to initialize.
1760 * @vport: vport associated with the VF.
1761 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1762 *
1763 * This routine initializes @mbox to all zeros and then fills in the mailbox
1764 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1765 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1766 * fabrics identified by VFI in the context of an FCF.
1767 **/
1768void
1769lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1770{
1771 struct lpfc_mbx_reg_vfi *reg_vfi;
1772
1773 memset(mbox, 0, sizeof(*mbox));
1774 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1775 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1776 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1777 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1778 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1779 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1780 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1781 reg_vfi->bde.addrLow = putPaddrLow(phys);
1782 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1783 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1784 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1785}
1786
1787/**
1788 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1789 * @mbox: pointer to lpfc mbox command to initialize.
1790 * @vpi: VPI to be initialized.
1791 *
1792 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1793 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1794 * with the virtual N Port. The SLI Host issues this command before issuing a
1795 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1796 * successful virtual NPort login.
1797 **/
1798void
1799lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1800{
1801 memset(mbox, 0, sizeof(*mbox));
1802 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1803 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1804}
1805
1806/**
1807 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1808 * @mbox: pointer to lpfc mbox command to initialize.
1809 * @vfi: VFI to be unregistered.
1810 *
1811 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1812 * (logical NPort) into the inactive state. The SLI Host must have logged out
1813 * and unregistered all remote N_Ports to abort any activity on the virtual
1814 * fabric. The SLI Port posts the mailbox response after marking the virtual
1815 * fabric inactive.
1816 **/
1817void
1818lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1819{
1820 memset(mbox, 0, sizeof(*mbox));
1821 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1822 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1823}
1824
1825/**
1826 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1827 * @phba: pointer to the hba structure containing.
1828 * @mbox: pointer to lpfc mbox command to initialize.
1829 *
1830 * This function create a SLI4 dump mailbox command to dump FCoE
1831 * parameters stored in region 23.
1832 **/
1833int
1834lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1835 struct lpfcMboxq *mbox)
1836{
1837 struct lpfc_dmabuf *mp = NULL;
1838 MAILBOX_t *mb;
1839
1840 memset(mbox, 0, sizeof(*mbox));
1841 mb = &mbox->u.mb;
1842
1843 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1844 if (mp)
1845 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1846
1847 if (!mp || !mp->virt) {
1848 kfree(mp);
1849 /* dump_fcoe_param failed to allocate memory */
1850 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1851 "2569 lpfc_dump_fcoe_param: memory"
1852 " allocation failed \n");
1853 return 1;
1854 }
1855
1856 memset(mp->virt, 0, LPFC_BPL_SIZE);
1857 INIT_LIST_HEAD(&mp->list);
1858
1859 /* save address for completion */
1860 mbox->context1 = (uint8_t *) mp;
1861
1862 mb->mbxCommand = MBX_DUMP_MEMORY;
1863 mb->un.varDmp.type = DMP_NV_PARAMS;
1864 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1865 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1866 mb->un.varWords[3] = putPaddrLow(mp->phys);
1867 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1868 return 0;
1869}
1870
1871/**
1872 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1873 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1874 * @mbox: pointer to lpfc mbox command to initialize.
1875 *
1876 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1877 * SLI Host uses the command to activate an FCF after it has acquired FCF
1878 * information via a READ_FCF mailbox command. This mailbox command also is used
1879 * to indicate where received unsolicited frames from this FCF will be sent. By
1880 * default this routine will set up the FCF to forward all unsolicited frames
1881 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1882 * more complicated setups.
1883 **/
1884void
1885lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1886{
1887 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1888
1889 memset(mbox, 0, sizeof(*mbox));
1890 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1891 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1892 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1893 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1894 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1895 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1896 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1897 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1898 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1899 (~phba->fcf.addr_mode) & 0x3);
1900 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1901 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1902 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1903 }
1904}
1905
1906/**
1907 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1908 * @mbox: pointer to lpfc mbox command to initialize.
1909 * @fcfi: FCFI to be unregistered.
1910 *
1911 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1912 * The SLI Host uses the command to inactivate an FCFI.
1913 **/
1914void
1915lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1916{
1917 memset(mbox, 0, sizeof(*mbox));
1918 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1919 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1920}
1921
1922/**
1923 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1924 * @mbox: pointer to lpfc mbox command to initialize.
1925 * @ndlp: The nodelist structure that describes the RPI to resume.
1926 *
1927 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1928 * link event.
1929 **/
1930void
1931lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1932{
1933 struct lpfc_mbx_resume_rpi *resume_rpi;
1934
1935 memset(mbox, 0, sizeof(*mbox));
1936 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1937 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1938 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1939 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1940 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1941 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1942 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1943}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a976733398..e198c917c13e 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41c..09f659f77bb3 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 8032c5adb6a9..e9fa6762044a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 329
326 vports = lpfc_create_vport_work_array(phba); 330 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 331 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 332 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 333 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 334 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 335 new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 383
380 vports = lpfc_create_vport_work_array(phba); 384 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 385 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 387 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 388 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 389 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 431
428 vports = lpfc_create_vport_work_array(phba); 432 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 433 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 434 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 435 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 436 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 437 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 442}
439 443
440/** 444/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 445 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 446 * @vport: The virtual port for which this call being executed.
447 * @num_to_allocate: The requested number of buffers to allocate.
443 * 448 *
444 * This routine allocates a scsi buffer, which contains all the necessary 449 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 450 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 451 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 452 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 453 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 454 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 455 *
451 * Return codes: 456 * Return codes:
452 * NULL - Error 457 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 458 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 459 **/
455static struct lpfc_scsi_buf * 460static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 461lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 462{
458 struct lpfc_hba *phba = vport->phba; 463 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 464 struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 468 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 469 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 470 uint16_t iotag;
471 int bcnt;
466 472
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 473 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 474 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 475 if (!psb)
476 break;
477
478 /*
479 * Get memory from the pci pool to map the virt space to pci
480 * bus space for an I/O. The DMA buffer includes space for the
481 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
482 * necessary to support the sg_tablesize.
483 */
484 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
485 GFP_KERNEL, &psb->dma_handle);
486 if (!psb->data) {
487 kfree(psb);
488 break;
489 }
490
491 /* Initialize virtual ptrs to dma_buf region. */
492 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
493
494 /* Allocate iotag for psb->cur_iocbq. */
495 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
496 if (iotag == 0) {
497 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
498 psb->data, psb->dma_handle);
499 kfree(psb);
500 break;
501 }
502 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
503
504 psb->fcp_cmnd = psb->data;
505 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
506 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508
509 /* Initialize local short-hand pointers. */
510 bpl = psb->fcp_bpl;
511 pdma_phys_fcp_cmd = psb->dma_handle;
512 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
513 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
514 sizeof(struct fcp_rsp);
515
516 /*
517 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
518 * are sg list bdes. Initialize the first two and leave the
519 * rest for queuecommand.
520 */
521 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
522 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
523 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
524 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
526
527 /* Setup the physical region for the FCP RSP */
528 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
529 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
530 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
531 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
532 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
533
534 /*
535 * Since the IOCB for the FCP I/O is built into this
536 * lpfc_scsi_buf, initialize it with all known data now.
537 */
538 iocb = &psb->cur_iocbq.iocb;
539 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
540 if ((phba->sli_rev == 3) &&
541 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
542 /* fill in immediate fcp command BDE */
543 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
544 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
545 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
546 unsli3.fcp_ext.icd);
547 iocb->un.fcpi64.bdl.addrHigh = 0;
548 iocb->ulpBdeCount = 0;
549 iocb->ulpLe = 0;
550 /* fill in responce BDE */
551 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
552 BUFF_TYPE_BDE_64;
553 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
554 sizeof(struct fcp_rsp);
555 iocb->unsli3.fcp_ext.rbde.addrLow =
556 putPaddrLow(pdma_phys_fcp_rsp);
557 iocb->unsli3.fcp_ext.rbde.addrHigh =
558 putPaddrHigh(pdma_phys_fcp_rsp);
559 } else {
560 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
561 iocb->un.fcpi64.bdl.bdeSize =
562 (2 * sizeof(struct ulp_bde64));
563 iocb->un.fcpi64.bdl.addrLow =
564 putPaddrLow(pdma_phys_bpl);
565 iocb->un.fcpi64.bdl.addrHigh =
566 putPaddrHigh(pdma_phys_bpl);
567 iocb->ulpBdeCount = 1;
568 iocb->ulpLe = 1;
569 }
570 iocb->ulpClass = CLASS3;
571 psb->status = IOSTAT_SUCCESS;
572 /* Put it back into the SCSI buffer list */
573 lpfc_release_scsi_buf_s4(phba, psb);
470 574
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 575 }
483 576
484 /* Initialize virtual ptrs to dma_buf region. */ 577 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 578}
486 579
487 /* Allocate iotag for psb->cur_iocbq. */ 580/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 581 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 582 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 583 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 584 *
492 kfree (psb); 585 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 586 * FCP aborted xri.
587 **/
588void
589lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
590 struct sli4_wcqe_xri_aborted *axri)
591{
592 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
593 struct lpfc_scsi_buf *psb, *next_psb;
594 unsigned long iflag = 0;
595
596 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
597 list_for_each_entry_safe(psb, next_psb,
598 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
599 if (psb->cur_iocbq.sli4_xritag == xri) {
600 list_del(&psb->list);
601 psb->status = IOSTAT_SUCCESS;
602 spin_unlock_irqrestore(
603 &phba->sli4_hba.abts_scsi_buf_list_lock,
604 iflag);
605 lpfc_release_scsi_buf_s4(phba, psb);
606 return;
607 }
608 }
609 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
610 iflag);
611}
612
613/**
614 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
615 * @phba: pointer to lpfc hba data structure.
616 *
617 * This routine walks the list of scsi buffers that have been allocated and
618 * repost them to the HBA by using SGL block post. This is needed after a
619 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
620 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
621 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
622 *
623 * Returns: 0 = success, non-zero failure.
624 **/
625int
626lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
627{
628 struct lpfc_scsi_buf *psb;
629 int index, status, bcnt = 0, rcnt = 0, rc = 0;
630 LIST_HEAD(sblist);
631
632 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
633 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
634 if (psb) {
635 /* Remove from SCSI buffer list */
636 list_del(&psb->list);
637 /* Add it to a local SCSI buffer list */
638 list_add_tail(&psb->list, &sblist);
639 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
640 bcnt = rcnt;
641 rcnt = 0;
642 }
643 } else
644 /* A hole present in the XRI array, need to skip */
645 bcnt = rcnt;
646
647 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
648 /* End of XRI array for SCSI buffer, complete */
649 bcnt = rcnt;
650
651 /* Continue until collect up to a nembed page worth of sgls */
652 if (bcnt == 0)
653 continue;
654 /* Now, post the SCSI buffer list sgls as a block */
655 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
656 /* Reset SCSI buffer count for next round of posting */
657 bcnt = 0;
658 while (!list_empty(&sblist)) {
659 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
660 list);
661 if (status) {
662 /* Put this back on the abort scsi list */
663 psb->status = IOSTAT_LOCAL_REJECT;
664 psb->result = IOERR_ABORT_REQUESTED;
665 rc++;
666 } else
667 psb->status = IOSTAT_SUCCESS;
668 /* Put it back into the SCSI buffer list */
669 lpfc_release_scsi_buf_s4(phba, psb);
670 }
494 } 671 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 672 return rc;
673}
496 674
497 psb->fcp_cmnd = psb->data; 675/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 676 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 677 * @vport: The virtual port for which this call being executed.
500 sizeof(struct fcp_rsp); 678 * @num_to_allocate: The requested number of buffers to allocate.
679 *
680 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
681 * the scsi buffer contains all the necessary information needed to initiate
682 * a SCSI I/O.
683 *
684 * Return codes:
685 * int - number of scsi buffers that were allocated.
686 * 0 = failure, less than num_to_alloc is a partial failure.
687 **/
688static int
689lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
690{
691 struct lpfc_hba *phba = vport->phba;
692 struct lpfc_scsi_buf *psb;
693 struct sli4_sge *sgl;
694 IOCB_t *iocb;
695 dma_addr_t pdma_phys_fcp_cmd;
696 dma_addr_t pdma_phys_fcp_rsp;
697 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
698 uint16_t iotag, last_xritag = NO_XRI;
699 int status = 0, index;
700 int bcnt;
701 int non_sequential_xri = 0;
702 int rc = 0;
703 LIST_HEAD(sblist);
704
705 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
706 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
707 if (!psb)
708 break;
501 709
502 /* Initialize local short-hand pointers. */ 710 /*
503 bpl = psb->fcp_bpl; 711 * Get memory from the pci pool to map the virt space to pci bus
504 pdma_phys_fcp_cmd = psb->dma_handle; 712 * space for an I/O. The DMA buffer includes space for the
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 713 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 714 * necessary to support the sg_tablesize.
507 sizeof(struct fcp_rsp); 715 */
716 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
717 GFP_KERNEL, &psb->dma_handle);
718 if (!psb->data) {
719 kfree(psb);
720 break;
721 }
508 722
509 /* 723 /* Initialize virtual ptrs to dma_buf region. */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 724 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526 725
527 /* 726 /* Allocate iotag for psb->cur_iocbq. */
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 727 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
529 * initialize it with all known data now. 728 if (iotag == 0) {
530 */ 729 kfree(psb);
531 iocb = &psb->cur_iocbq.iocb; 730 break;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 731 }
533 if ((phba->sli_rev == 3) && 732
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 733 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
535 /* fill in immediate fcp command BDE */ 734 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 735 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
736 psb->data, psb->dma_handle);
737 kfree(psb);
738 break;
739 }
740 if (last_xritag != NO_XRI
741 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
742 non_sequential_xri = 1;
743 } else
744 list_add_tail(&psb->list, &sblist);
745 last_xritag = psb->cur_iocbq.sli4_xritag;
746
747 index = phba->sli4_hba.scsi_xri_cnt++;
748 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
749
750 psb->fcp_bpl = psb->data;
751 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
752 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
753 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
754 sizeof(struct fcp_cmnd));
755
756 /* Initialize local short-hand pointers. */
757 sgl = (struct sli4_sge *)psb->fcp_bpl;
758 pdma_phys_bpl = psb->dma_handle;
759 pdma_phys_fcp_cmd =
760 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
761 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
762 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
763
764 /*
765 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
766 * are sg list bdes. Initialize the first two and leave the
767 * rest for queuecommand.
768 */
769 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
770 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
771 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
772 bf_set(lpfc_sli4_sge_last, sgl, 0);
773 sgl->word2 = cpu_to_le32(sgl->word2);
774 sgl->word3 = cpu_to_le32(sgl->word3);
775 sgl++;
776
777 /* Setup the physical region for the FCP RSP */
778 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
779 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
780 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
781 bf_set(lpfc_sli4_sge_last, sgl, 1);
782 sgl->word2 = cpu_to_le32(sgl->word2);
783 sgl->word3 = cpu_to_le32(sgl->word3);
784
785 /*
786 * Since the IOCB for the FCP I/O is built into this
787 * lpfc_scsi_buf, initialize it with all known data now.
788 */
789 iocb = &psb->cur_iocbq.iocb;
790 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
791 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
792 /* setting the BLP size to 2 * sizeof BDE may not be correct.
793 * We are setting the bpl to point to out sgl. An sgl's
794 * entries are 16 bytes, a bpl entries are 12 bytes.
795 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 796 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 797 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 798 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 799 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 800 iocb->ulpLe = 1;
801 iocb->ulpClass = CLASS3;
802 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
803 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
804 else
805 pdma_phys_bpl1 = 0;
806 psb->dma_phys_bpl = pdma_phys_bpl;
807 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
808 if (non_sequential_xri) {
809 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
810 pdma_phys_bpl1,
811 psb->cur_iocbq.sli4_xritag);
812 if (status) {
813 /* Put this back on the abort scsi list */
814 psb->status = IOSTAT_LOCAL_REJECT;
815 psb->result = IOERR_ABORT_REQUESTED;
816 rc++;
817 } else
818 psb->status = IOSTAT_SUCCESS;
819 /* Put it back into the SCSI buffer list */
820 lpfc_release_scsi_buf_s4(phba, psb);
821 break;
822 }
823 }
824 if (bcnt) {
825 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
826 /* Reset SCSI buffer count for next round of posting */
827 while (!list_empty(&sblist)) {
828 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
829 list);
830 if (status) {
831 /* Put this back on the abort scsi list */
832 psb->status = IOSTAT_LOCAL_REJECT;
833 psb->result = IOERR_ABORT_REQUESTED;
834 rc++;
835 } else
836 psb->status = IOSTAT_SUCCESS;
837 /* Put it back into the SCSI buffer list */
838 lpfc_release_scsi_buf_s4(phba, psb);
839 }
558 } 840 }
559 iocb->ulpClass = CLASS3;
560 841
561 return psb; 842 return bcnt + non_sequential_xri - rc;
562} 843}
563 844
564/** 845/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 846 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 847 * @vport: The virtual port for which this call being executed.
848 * @num_to_allocate: The requested number of buffers to allocate.
849 *
850 * This routine wraps the actual SCSI buffer allocator function pointer from
851 * the lpfc_hba struct.
852 *
853 * Return codes:
854 * int - number of scsi buffers that were allocated.
855 * 0 = failure, less than num_to_alloc is a partial failure.
856 **/
857static inline int
858lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
859{
860 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
861}
862
863/**
864 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
865 * @phba: The HBA for which this call is being executed.
567 * 866 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 867 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 868 * and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 890}
592 891
593/** 892/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 893 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 894 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 895 * @psb: The scsi buffer which is being released.
597 * 896 *
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 898 * lpfc_scsi_buf_list list.
600 **/ 899 **/
601static void 900static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 901lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 902{
604 unsigned long iflag = 0; 903 unsigned long iflag = 0;
605 904
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 909}
611 910
612/** 911/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 912 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
913 * @phba: The Hba for which this call is being executed.
914 * @psb: The scsi buffer which is being released.
915 *
916 * This routine releases @psb scsi buffer by adding it to tail of @phba
917 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
918 * and cannot be reused for at least RA_TOV amount of time if it was
919 * aborted.
920 **/
921static void
922lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923{
924 unsigned long iflag = 0;
925
926 if (psb->status == IOSTAT_LOCAL_REJECT
927 && psb->result == IOERR_ABORT_REQUESTED) {
928 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
929 iflag);
930 psb->pCmd = NULL;
931 list_add_tail(&psb->list,
932 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
933 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
934 iflag);
935 } else {
936
937 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
938 psb->pCmd = NULL;
939 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
940 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
941 }
942}
943
944/**
945 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
946 * @phba: The Hba for which this call is being executed.
947 * @psb: The scsi buffer which is being released.
948 *
949 * This routine releases @psb scsi buffer by adding it to tail of @phba
950 * lpfc_scsi_buf_list list.
951 **/
952static void
953lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
954{
955
956 phba->lpfc_release_scsi_buf(phba, psb);
957}
958
959/**
960 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 961 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 962 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 963 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 964 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 965 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 966 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 967 * IOCB fields which are dependent on scsi command request buffer.
621 * 968 *
622 * Return codes: 969 * Return codes:
623 * 1 - Error 970 * 1 - Error
624 * 0 - Success 971 * 0 - Success
625 **/ 972 **/
626static int 973static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 974lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 975{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 976 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 977 struct scatterlist *sgel = NULL;
@@ -1412,6 +1759,133 @@ out:
1412} 1759}
1413 1760
1414/** 1761/**
1762 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1763 * @phba: The Hba for which this call is being executed.
1764 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1765 *
1766 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1767 * field of @lpfc_cmd for device with SLI-4 interface spec.
1768 *
1769 * Return codes:
1770 * 1 - Error
1771 * 0 - Success
1772 **/
1773static int
1774lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1775{
1776 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1777 struct scatterlist *sgel = NULL;
1778 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1779 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1780 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1781 dma_addr_t physaddr;
1782 uint32_t num_bde = 0;
1783 uint32_t dma_len;
1784 uint32_t dma_offset = 0;
1785 int nseg;
1786
1787 /*
1788 * There are three possibilities here - use scatter-gather segment, use
1789 * the single mapping, or neither. Start the lpfc command prep by
1790 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1791 * data bde entry.
1792 */
1793 if (scsi_sg_count(scsi_cmnd)) {
1794 /*
1795 * The driver stores the segment count returned from pci_map_sg
1796 * because this a count of dma-mappings used to map the use_sg
1797 * pages. They are not guaranteed to be the same for those
1798 * architectures that implement an IOMMU.
1799 */
1800
1801 nseg = scsi_dma_map(scsi_cmnd);
1802 if (unlikely(!nseg))
1803 return 1;
1804 sgl += 1;
1805 /* clear the last flag in the fcp_rsp map entry */
1806 sgl->word2 = le32_to_cpu(sgl->word2);
1807 bf_set(lpfc_sli4_sge_last, sgl, 0);
1808 sgl->word2 = cpu_to_le32(sgl->word2);
1809 sgl += 1;
1810
1811 lpfc_cmd->seg_cnt = nseg;
1812 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1813 printk(KERN_ERR "%s: Too many sg segments from "
1814 "dma_map_sg. Config %d, seg_cnt %d\n",
1815 __func__, phba->cfg_sg_seg_cnt,
1816 lpfc_cmd->seg_cnt);
1817 scsi_dma_unmap(scsi_cmnd);
1818 return 1;
1819 }
1820
1821 /*
1822 * The driver established a maximum scatter-gather segment count
1823 * during probe that limits the number of sg elements in any
1824 * single scsi command. Just run through the seg_cnt and format
1825 * the sge's.
1826 * When using SLI-3 the driver will try to fit all the BDEs into
1827 * the IOCB. If it can't then the BDEs get added to a BPL as it
1828 * does for SLI-2 mode.
1829 */
1830 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1831 physaddr = sg_dma_address(sgel);
1832 dma_len = sg_dma_len(sgel);
1833 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1834 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1835 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1836 if ((num_bde + 1) == nseg)
1837 bf_set(lpfc_sli4_sge_last, sgl, 1);
1838 else
1839 bf_set(lpfc_sli4_sge_last, sgl, 0);
1840 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1841 sgl->word2 = cpu_to_le32(sgl->word2);
1842 sgl->word3 = cpu_to_le32(sgl->word3);
1843 dma_offset += dma_len;
1844 sgl++;
1845 }
1846 } else {
1847 sgl += 1;
1848 /* clear the last flag in the fcp_rsp map entry */
1849 sgl->word2 = le32_to_cpu(sgl->word2);
1850 bf_set(lpfc_sli4_sge_last, sgl, 1);
1851 sgl->word2 = cpu_to_le32(sgl->word2);
1852 }
1853
1854 /*
1855 * Finish initializing those IOCB fields that are dependent on the
1856 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1857 * explicitly reinitialized.
1858 * all iocb memory resources are reused.
1859 */
1860 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1861
1862 /*
1863 * Due to difference in data length between DIF/non-DIF paths,
1864 * we need to set word 4 of IOCB here
1865 */
1866 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1867 return 0;
1868}
1869
1870/**
1871 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1872 * @phba: The Hba for which this call is being executed.
1873 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1874 *
1875 * This routine wraps the actual DMA mapping function pointer from the
1876 * lpfc_hba struct.
1877 *
1878 * Return codes:
1879 * 1 - Error
1880 * 0 - Success
1881 **/
1882static inline int
1883lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1884{
1885 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1886}
1887
1888/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1889 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1890 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1891 * @vport: Pointer to vport object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1978}
1505 1979
1506/** 1980/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
1508 * @phba: The Hba for which this call is being executed. 1982 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 1983 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 1984 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 1985 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 1986 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 1987 **/
1514static void 1988static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 1990{
1517 /* 1991 /*
1518 * There are only two special cases to consider. (1) the scsi command 1992 * There are only two special cases to consider. (1) the scsi command
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1529} 2003}
1530 2004
1531/** 2005/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
1532 * lpfc_handler_fcp_err - FCP response handler 2036 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed. 2037 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2180 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2181 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2182 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2183 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2184 *
1681 * This routine assigns scsi command result by looking into response IOCB 2185 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2186 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2461}
1958 2462
1959/** 2463/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
1961 * @vport: The virtual port for which this call is being executed. 2465 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2466 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2467 * @pnode: Pointer to lpfc_nodelist.
1964 * 2468 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2469 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2470 * to transfer for device with SLI3 interface spec.
1967 **/ 2471 **/
1968static void 2472static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode) 2474 struct lpfc_nodelist *pnode)
1971{ 2475{
1972 struct lpfc_hba *phba = vport->phba; 2476 struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2517 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2518 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2519 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2520 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2521 iocb_cmd->un.fcpi.fcpi_parm = 0;
2522 iocb_cmd->ulpPU = 0;
2523 } else
2524 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2525 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2526 phba->fc4OutputRequests++;
2020 } else { 2527 } else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2558}
2052 2559
2053/** 2560/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2601 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2603 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2604 * @task_mgmt_cmd: SCSI task management command.
2059 * 2605 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2606 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2607 * for device with SLI-3 interface spec.
2061 * 2608 *
2062 * Return codes: 2609 * Return codes:
2063 * 0 - Error 2610 * 0 - Error
2064 * 1 - Success 2611 * 1 - Success
2065 **/ 2612 **/
2066static int 2613static int
2067lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2068 struct lpfc_scsi_buf *lpfc_cmd, 2615 struct lpfc_scsi_buf *lpfc_cmd,
2069 unsigned int lun, 2616 unsigned int lun,
2070 uint8_t task_mgmt_cmd) 2617 uint8_t task_mgmt_cmd)
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2114} 2661}
2115 2662
2116/** 2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693
2694/**
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2717 task_mgmt_cmd);
2718}
2719
2720/**
2721 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2722 * @phba: The hba struct for which this call is being executed.
2723 * @dev_grp: The HBA PCI-Device group number.
2724 *
2725 * This routine sets up the SCSI interface API function jump table in @phba
2726 * struct.
2727 * Returns: 0 - success, -ENODEV - failure.
2728 **/
2729int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{
2732
2733 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break;
2743 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break;
2752 default:
2753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2754 "1418 Invalid HBA PCI-device group: 0x%x\n",
2755 dev_grp);
2756 return -ENODEV;
2757 break;
2758 }
2759 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2760 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2761 return 0;
2762}
2763
2764/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2765 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2766 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2767 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba, 2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2182 &phba->sli.ring[phba->sli.fcp_ring], 2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) { 2831 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) { 2832 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2952 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2953 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2954 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2955 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2956 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2957 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3073 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 3074
2429 atomic_inc(&ndlp->cmd_pending); 3075 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 3076 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3077 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 3078 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 3079 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 3136 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 3138 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 3139 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 3140 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 3141 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3176 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3177 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3178 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3179 if (phba->sli_rev == LPFC_SLI_REV4)
3180 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3181 else
3182 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3183
2536 icmd->ulpLe = 1; 3184 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3185 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3190
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3191 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3192 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3193 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3194 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3195 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3196 ret = FAILED;
2548 goto out; 3197 goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2668 "0703 Issue target reset to TGT %d LUN %d " 3317 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671 status = lpfc_sli_issue_iocb_wait(phba, 3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3321 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3322 if (status == IOCB_TIMEDOUT) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3473{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3474 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3475 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3476 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3477 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3478 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3479 int num_allocated = 0;
2833 3480
2834 if (!rport || fc_remote_port_chkready(rport)) 3481 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3482 return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3510 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3511 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3512 }
2866 3513 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3514 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3515 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3516 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3517 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3518 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3519 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3520 }
2881 return 0; 3521 return 0;
2882} 3522}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa29..65dfc8bd5b49 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba4..ff04daf18f48 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE)
4143 return -EIO;
4144
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4147 kfree(mp);
4148 return 0;
4149}
4150
4151/**
4152 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4153 * @phba: pointer to lpfc hba data structure.
4154 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4155 * @vpd: pointer to the memory to hold resulting port vpd data.
4156 * @vpd_size: On input, the number of bytes allocated to @vpd.
4157 * On output, the number of data bytes in @vpd.
4158 *
4159 * This routine executes a READ_REV SLI4 mailbox command. In
4160 * addition, this routine gets the port vpd data.
4161 *
4162 * Return codes
4163 * 0 - sucessful
4164 * ENOMEM - could not allocated memory.
4165 **/
4166static int
4167lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4168 uint8_t *vpd, uint32_t *vpd_size)
4169{
4170 int rc = 0;
4171 uint32_t dma_size;
4172 struct lpfc_dmabuf *dmabuf;
4173 struct lpfc_mqe *mqe;
4174
4175 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4176 if (!dmabuf)
4177 return -ENOMEM;
4178
4179 /*
4180 * Get a DMA buffer for the vpd data resulting from the READ_REV
4181 * mailbox command.
4182 */
4183 dma_size = *vpd_size;
4184 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4185 dma_size,
4186 &dmabuf->phys,
4187 GFP_KERNEL);
4188 if (!dmabuf->virt) {
4189 kfree(dmabuf);
4190 return -ENOMEM;
4191 }
4192 memset(dmabuf->virt, 0, dma_size);
4193
4194 /*
4195 * The SLI4 implementation of READ_REV conflicts at word1,
4196 * bits 31:16 and SLI4 adds vpd functionality not present
4197 * in SLI3. This code corrects the conflicts.
4198 */
4199 lpfc_read_rev(phba, mboxq);
4200 mqe = &mboxq->u.mqe;
4201 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4202 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4203 mqe->un.read_rev.word1 &= 0x0000FFFF;
4204 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4205 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4206
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc) {
4209 dma_free_coherent(&phba->pcidev->dev, dma_size,
4210 dmabuf->virt, dmabuf->phys);
4211 return -EIO;
4212 }
4213
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /*
4236 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than
4238 * case and update the caller's size.
4239 */
4240 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4241 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4242
4243 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4244 dma_free_coherent(&phba->pcidev->dev, dma_size,
4245 dmabuf->virt, dmabuf->phys);
4246 kfree(dmabuf);
4247 return 0;
4248}
4249
4250/**
4251 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4252 * @phba: pointer to lpfc hba data structure.
4253 *
4254 * This routine is called to explicitly arm the SLI4 device's completion and
4255 * event queues
4256 **/
4257static void
4258lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4259{
4260 uint8_t fcp_eqidx;
4261
4262 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4263 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4264 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4265 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4266 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4267 LPFC_QUEUE_REARM);
4268 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4269 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4270 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4271 LPFC_QUEUE_REARM);
4272}
4273
4274/**
4275 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4276 * @phba: Pointer to HBA context object.
4277 *
4278 * This function is the main SLI4 device intialization PCI function. This
4279 * function is called by the HBA intialization code, HBA reset code and
4280 * HBA error attention handler code. Caller is not required to hold any
4281 * locks.
4282 **/
4283int
4284lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4285{
4286 int rc;
4287 LPFC_MBOXQ_t *mboxq;
4288 struct lpfc_mqe *mqe;
4289 uint8_t *vpd;
4290 uint32_t vpd_size;
4291 uint32_t ftr_rsp = 0;
4292 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4293 struct lpfc_vport *vport = phba->pport;
4294 struct lpfc_dmabuf *mp;
4295
4296 /* Perform a PCI function reset to start from clean */
4297 rc = lpfc_pci_function_reset(phba);
4298 if (unlikely(rc))
4299 return -ENODEV;
4300
4301 /* Check the HBA Host Status Register for readyness */
4302 rc = lpfc_sli4_post_status_check(phba);
4303 if (unlikely(rc))
4304 return -ENODEV;
4305 else {
4306 spin_lock_irq(&phba->hbalock);
4307 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4308 spin_unlock_irq(&phba->hbalock);
4309 }
4310
4311 /*
4312 * Allocate a single mailbox container for initializing the
4313 * port.
4314 */
4315 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4316 if (!mboxq)
4317 return -ENOMEM;
4318
4319 /*
4320 * Continue initialization with default values even if driver failed
4321 * to read FCoE param config regions
4322 */
4323 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4325 "2570 Failed to read FCoE parameters \n");
4326
4327 /* Issue READ_REV to collect vpd and FW information. */
4328 vpd_size = PAGE_SIZE;
4329 vpd = kzalloc(vpd_size, GFP_KERNEL);
4330 if (!vpd) {
4331 rc = -ENOMEM;
4332 goto out_free_mbox;
4333 }
4334
4335 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4336 if (unlikely(rc))
4337 goto out_free_vpd;
4338
4339 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) ||
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO;
4349 goto out_free_vpd;
4350 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /*
4356 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure
4358 * is not fatal as the driver will use generic values.
4359 */
4360 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4361 if (unlikely(!rc)) {
4362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4363 "0377 Error %d parsing vpd. "
4364 "Using defaults.\n", rc);
4365 rc = 0;
4366 }
4367
4368 /* By now, we should determine the SLI revision, hard code for now */
4369 phba->sli_rev = LPFC_SLI_REV4;
4370
4371 /*
4372 * Discover the port's supported feature set and match it against the
4373 * hosts requests.
4374 */
4375 lpfc_request_features(phba, mboxq);
4376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4377 if (unlikely(rc)) {
4378 rc = -EIO;
4379 goto out_free_vpd;
4380 }
4381
4382 /*
4383 * The port must support FCP initiator mode as this is the
4384 * only mode running in the host.
4385 */
4386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4388 "0378 No support for fcpi mode.\n");
4389 ftr_rsp++;
4390 }
4391
4392 /*
4393 * If the port cannot support the host's requested features
4394 * then turn off the global config parameters to disable the
4395 * feature in the driver. This is not a fatal error.
4396 */
4397 if ((phba->cfg_enable_bg) &&
4398 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4399 ftr_rsp++;
4400
4401 if (phba->max_vpi && phba->cfg_enable_npiv &&
4402 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4403 ftr_rsp++;
4404
4405 if (ftr_rsp) {
4406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4407 "0379 Feature Mismatch Data: x%08x %08x "
4408 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4409 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4410 phba->cfg_enable_npiv, phba->max_vpi);
4411 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4412 phba->cfg_enable_bg = 0;
4413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4414 phba->cfg_enable_npiv = 0;
4415 }
4416
4417 /* These SLI3 features are assumed in SLI4 */
4418 spin_lock_irq(&phba->hbalock);
4419 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4420 spin_unlock_irq(&phba->hbalock);
4421
4422 /* Read the port's service parameters. */
4423 lpfc_read_sparam(phba, mboxq, vport->vpi);
4424 mboxq->vport = vport;
4425 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4426 mp = (struct lpfc_dmabuf *) mboxq->context1;
4427 if (rc == MBX_SUCCESS) {
4428 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4429 rc = 0;
4430 }
4431
4432 /*
4433 * This memory was allocated by the lpfc_read_sparam routine. Release
4434 * it to the mbuf pool.
4435 */
4436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4437 kfree(mp);
4438 mboxq->context1 = NULL;
4439 if (unlikely(rc)) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4441 "0382 READ_SPARAM command failed "
4442 "status %d, mbxStatus x%x\n",
4443 rc, bf_get(lpfc_mqe_status, mqe));
4444 phba->link_state = LPFC_HBA_ERROR;
4445 rc = -EIO;
4446 goto out_free_vpd;
4447 }
4448
4449 if (phba->cfg_soft_wwnn)
4450 u64_to_wwn(phba->cfg_soft_wwnn,
4451 vport->fc_sparam.nodeName.u.wwn);
4452 if (phba->cfg_soft_wwpn)
4453 u64_to_wwn(phba->cfg_soft_wwpn,
4454 vport->fc_sparam.portName.u.wwn);
4455 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4456 sizeof(struct lpfc_name));
4457 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4458 sizeof(struct lpfc_name));
4459
4460 /* Update the fc_host data structures with new wwn. */
4461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4463
4464 /* Register SGL pool to the device using non-embedded mailbox command */
4465 rc = lpfc_sli4_post_sgl_list(phba);
4466 if (unlikely(rc)) {
4467 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4468 "0582 Error %d during sgl post operation", rc);
4469 rc = -ENODEV;
4470 goto out_free_vpd;
4471 }
4472
4473 /* Register SCSI SGL pool to the device */
4474 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4475 if (unlikely(rc)) {
4476 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4477 "0383 Error %d during scsi sgl post opeation",
4478 rc);
4479 /* Some Scsi buffers were moved to the abort scsi list */
4480 /* A pci function reset will repost them */
4481 rc = -ENODEV;
4482 goto out_free_vpd;
4483 }
4484
4485 /* Post the rpi header region to the device. */
4486 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4487 if (unlikely(rc)) {
4488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4489 "0393 Error %d during rpi post operation\n",
4490 rc);
4491 rc = -ENODEV;
4492 goto out_free_vpd;
4493 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496
4497 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba);
4499 if (unlikely(rc)) {
4500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4501 "0381 Error %d during queue setup.\n ", rc);
4502 goto out_stop_timers;
4503 }
4504
4505 /* Arm the CQs and then EQs on device */
4506 lpfc_sli4_arm_cqeq_intr(phba);
4507
4508 /* Indicate device interrupt mode */
4509 phba->sli4_hba.intr_enable = 1;
4510
4511 /* Allow asynchronous mailbox command to go through */
4512 spin_lock_irq(&phba->hbalock);
4513 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4514 spin_unlock_irq(&phba->hbalock);
4515
4516 /* Post receive buffers to the device */
4517 lpfc_sli4_rb_setup(phba);
4518
4519 /* Start the ELS watchdog timer */
4520 /*
4521 * The driver for SLI4 is not yet ready to process timeouts
4522 * or interrupts. Once it is, the comment bars can be removed.
4523 */
4524 /* mod_timer(&vport->els_tmofunc,
4525 * jiffies + HZ * (phba->fc_ratov*2)); */
4526
4527 /* Start heart beat timer */
4528 mod_timer(&phba->hb_tmofunc,
4529 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4530 phba->hb_outstanding = 0;
4531 phba->last_completion_time = jiffies;
4532
4533 /* Start error attention (ERATT) polling timer */
4534 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4535
4536 /*
4537 * The port is ready, set the host's link state to LINK_DOWN
4538 * in preparation for link interrupts.
4539 */
4540 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4541 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4542 lpfc_set_loopback_flag(phba);
4543 /* Change driver state to LPFC_LINK_DOWN right before init link */
4544 spin_lock_irq(&phba->hbalock);
4545 phba->link_state = LPFC_LINK_DOWN;
4546 spin_unlock_irq(&phba->hbalock);
4547 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4548 if (unlikely(rc != MBX_NOT_FINISHED)) {
4549 kfree(vpd);
4550 return 0;
4551 } else
4552 rc = -EIO;
4553
4554 /* Unset all the queues set up in this routine when error out */
4555 if (rc)
4556 lpfc_sli4_queue_unset(phba);
4557
4558out_stop_timers:
4559 if (rc)
4560 lpfc_stop_hba_timers(phba);
4561out_free_vpd:
4562 kfree(vpd);
4563out_free_mbox:
4564 mempool_free(mboxq, phba->mbox_mem_pool);
4565 return rc;
4566}
3203 4567
3204/** 4568/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4569 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4608lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4609{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4610 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4611 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4612 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4613 struct lpfc_sli_ring *pring;
3250 4614
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4645 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4646 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4647 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4648 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4649 spin_unlock_irq(&phba->hbalock);
3286 4650
3287 pring = &psli->ring[psli->fcp_ring]; 4651 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4653
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4655 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4656
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4657 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4658 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4659}
3308 4660
3309/** 4661/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4662 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4663 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4664 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4665 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4666 *
3315 * This function is called by discovery code and HBA management code 4667 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4668 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4669 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4670 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4671 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4672 * mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4684 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4685 * the function.
3334 **/ 4686 **/
3335int 4687static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4688lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4689 uint32_t flag)
3337{ 4690{
3338 MAILBOX_t *mb; 4691 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4692 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4702 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4703 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4704 /* processing mbox queue from intr_handler */
4705 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4706 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4707 return MBX_SUCCESS;
4708 }
3352 processing_queue = 1; 4709 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4710 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4711 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4722 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4723 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4724 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4725 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4726 dump_stack();
3370 goto out_not_finished; 4727 goto out_not_finished;
3371 } 4728 }
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4742
3386 psli = &phba->sli; 4743 psli = &phba->sli;
3387 4744
3388 mb = &pmbox->mb; 4745 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4746 status = MBX_SUCCESS;
3390 4747
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4748 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4750
3394 /* Mbox command <mbxCommand> cannot issue */ 4751 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4753 "(%d):0311 Mailbox command x%x cannot "
4754 "issue Data: x%x x%x\n",
4755 pmbox->vport ? pmbox->vport->vpi : 0,
4756 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4757 goto out_not_finished;
3397 } 4758 }
3398 4759
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4760 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4761 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4762 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4763 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4764 "(%d):2528 Mailbox command x%x cannot "
4765 "issue Data: x%x x%x\n",
4766 pmbox->vport ? pmbox->vport->vpi : 0,
4767 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4768 goto out_not_finished;
3404 } 4769 }
3405 4770
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4778 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4779
3415 /* Mbox command <mbxCommand> cannot issue */ 4780 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4782 "(%d):2529 Mailbox command x%x "
4783 "cannot issue Data: x%x x%x\n",
4784 pmbox->vport ? pmbox->vport->vpi : 0,
4785 pmbox->u.mb.mbxCommand,
4786 psli->sli_flag, flag);
3417 goto out_not_finished; 4787 goto out_not_finished;
3418 } 4788 }
3419 4789
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4790 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4792 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4793 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4794 "(%d):2530 Mailbox command x%x "
4795 "cannot issue Data: x%x x%x\n",
4796 pmbox->vport ? pmbox->vport->vpi : 0,
4797 pmbox->u.mb.mbxCommand,
4798 psli->sli_flag, flag);
3424 goto out_not_finished; 4799 goto out_not_finished;
3425 } 4800 }
3426 4801
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4837
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4838 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4839 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4840 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4841 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4842 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4843 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4844 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4846 "(%d):2531 Mailbox command x%x "
4847 "cannot issue Data: x%x x%x\n",
4848 pmbox->vport ? pmbox->vport->vpi : 0,
4849 pmbox->u.mb.mbxCommand,
4850 psli->sli_flag, flag);
3471 goto out_not_finished; 4851 goto out_not_finished;
3472 } 4852 }
3473 /* timeout active mbox command */ 4853 /* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4886 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4887 mb->mbxOwner = OWN_CHIP;
3508 4888
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4889 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4890 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4891 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4892 } else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4909
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4910 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4911 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4912 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4913 }
3534 } 4914 }
3535 4915
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4932 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4933 readl(phba->CAregaddr); /* flush */
3554 4934
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4935 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4936 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4937 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4938 word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4971 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4972 }
3593 4973
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4974 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4975 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4976 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4977 word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4984 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4985 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4986 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4987 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4988 word0 = slimword0;
3609 } 4989 }
3610 } 4990 }
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 4996 ha_copy = readl(phba->HAregaddr);
3617 } 4997 }
3618 4998
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4999 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5000 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5001 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5002 } else {
@@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5023
3644out_not_finished: 5024out_not_finished:
3645 if (processing_queue) { 5025 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5026 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5027 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5028 }
3649 return MBX_NOT_FINISHED; 5029 return MBX_NOT_FINISHED;
3650} 5030}
3651 5031
3652/** 5032/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object.
5036 *
5037 * The function posts a mailbox to the port. The mailbox is expected
5038 * to be comletely filled in and ready for the port to operate on it.
5039 * This routine executes a synchronous completion operation on the
5040 * mailbox by polling for its completion.
5041 *
5042 * The caller must not be holding any locks when calling this routine.
5043 *
5044 * Returns:
5045 * MBX_SUCCESS - mailbox posted successfully
5046 * Any of the MBX error values.
5047 **/
5048static int
5049lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5050{
5051 int rc = MBX_SUCCESS;
5052 unsigned long iflag;
5053 uint32_t db_ready;
5054 uint32_t mcqe_status;
5055 uint32_t mbx_cmnd;
5056 unsigned long timeout;
5057 struct lpfc_sli *psli = &phba->sli;
5058 struct lpfc_mqe *mb = &mboxq->u.mqe;
5059 struct lpfc_bmbx_create *mbox_rgn;
5060 struct dma_address *dma_address;
5061 struct lpfc_register bmbx_reg;
5062
5063 /*
5064 * Only one mailbox can be active to the bootstrap mailbox region
5065 * at a time and there is no queueing provided.
5066 */
5067 spin_lock_irqsave(&phba->hbalock, iflag);
5068 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5069 spin_unlock_irqrestore(&phba->hbalock, iflag);
5070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5071 "(%d):2532 Mailbox command x%x (x%x) "
5072 "cannot issue Data: x%x x%x\n",
5073 mboxq->vport ? mboxq->vport->vpi : 0,
5074 mboxq->u.mb.mbxCommand,
5075 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5076 psli->sli_flag, MBX_POLL);
5077 return MBXERR_ERROR;
5078 }
5079 /* The server grabs the token and owns it until release */
5080 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5081 phba->sli.mbox_active = mboxq;
5082 spin_unlock_irqrestore(&phba->hbalock, iflag);
5083
5084 /*
5085 * Initialize the bootstrap memory region to avoid stale data areas
5086 * in the mailbox post. Then copy the caller's mailbox contents to
5087 * the bmbx mailbox region.
5088 */
5089 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5090 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5091 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5092 sizeof(struct lpfc_mqe));
5093
5094 /* Post the high mailbox dma address to the port and wait for ready. */
5095 dma_address = &phba->sli4_hba.bmbx.dma_address;
5096 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5097
5098 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5099 * 1000) + jiffies;
5100 do {
5101 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5102 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5103 if (!db_ready)
5104 msleep(2);
5105
5106 if (time_after(jiffies, timeout)) {
5107 rc = MBXERR_ERROR;
5108 goto exit;
5109 }
5110 } while (!db_ready);
5111
5112 /* Post the low mailbox dma address to the port. */
5113 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5114 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5115 * 1000) + jiffies;
5116 do {
5117 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5118 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5119 if (!db_ready)
5120 msleep(2);
5121
5122 if (time_after(jiffies, timeout)) {
5123 rc = MBXERR_ERROR;
5124 goto exit;
5125 }
5126 } while (!db_ready);
5127
5128 /*
5129 * Read the CQ to ensure the mailbox has completed.
5130 * If so, update the mailbox status so that the upper layers
5131 * can complete the request normally.
5132 */
5133 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5134 sizeof(struct lpfc_mqe));
5135 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5136 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5137 sizeof(struct lpfc_mcqe));
5138 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5139
5140 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5141 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5142 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5143 rc = MBXERR_ERROR;
5144 }
5145
5146 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5147 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5148 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5149 " x%x x%x CQ: x%x x%x x%x x%x\n",
5150 mboxq->vport ? mboxq->vport->vpi : 0,
5151 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5152 bf_get(lpfc_mqe_status, mb),
5153 mb->un.mb_words[0], mb->un.mb_words[1],
5154 mb->un.mb_words[2], mb->un.mb_words[3],
5155 mb->un.mb_words[4], mb->un.mb_words[5],
5156 mb->un.mb_words[6], mb->un.mb_words[7],
5157 mb->un.mb_words[8], mb->un.mb_words[9],
5158 mb->un.mb_words[10], mb->un.mb_words[11],
5159 mb->un.mb_words[12], mboxq->mcqe.word0,
5160 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5161 mboxq->mcqe.trailer);
5162exit:
5163 /* We are holding the token, no needed for lock when release */
5164 spin_lock_irqsave(&phba->hbalock, iflag);
5165 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5166 phba->sli.mbox_active = NULL;
5167 spin_unlock_irqrestore(&phba->hbalock, iflag);
5168 return rc;
5169}
5170
5171/**
5172 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5173 * @phba: Pointer to HBA context object.
5174 * @pmbox: Pointer to mailbox object.
5175 * @flag: Flag indicating how the mailbox need to be processed.
5176 *
5177 * This function is called by discovery code and HBA management code to submit
5178 * a mailbox command to firmware with SLI-4 interface spec.
5179 *
5180 * Return codes the caller owns the mailbox command after the return of the
5181 * function.
5182 **/
5183static int
5184lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5185 uint32_t flag)
5186{
5187 struct lpfc_sli *psli = &phba->sli;
5188 unsigned long iflags;
5189 int rc;
5190
5191 /* Detect polling mode and jump to a handler */
5192 if (!phba->sli4_hba.intr_enable) {
5193 if (flag == MBX_POLL)
5194 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5195 else
5196 rc = -EIO;
5197 if (rc != MBX_SUCCESS)
5198 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5199 "(%d):2541 Mailbox command x%x "
5200 "(x%x) cannot issue Data: x%x x%x\n",
5201 mboxq->vport ? mboxq->vport->vpi : 0,
5202 mboxq->u.mb.mbxCommand,
5203 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5204 psli->sli_flag, flag);
5205 return rc;
5206 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) "
5209 "cannot issue Data: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag);
5214 return -EIO;
5215 }
5216
5217 /* Now, interrupt mode asynchrous mailbox command */
5218 rc = lpfc_mbox_cmd_check(phba, mboxq);
5219 if (rc) {
5220 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5221 "(%d):2543 Mailbox command x%x (x%x) "
5222 "cannot issue Data: x%x x%x\n",
5223 mboxq->vport ? mboxq->vport->vpi : 0,
5224 mboxq->u.mb.mbxCommand,
5225 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5226 psli->sli_flag, flag);
5227 goto out_not_finished;
5228 }
5229 rc = lpfc_mbox_dev_check(phba);
5230 if (unlikely(rc)) {
5231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5232 "(%d):2544 Mailbox command x%x (x%x) "
5233 "cannot issue Data: x%x x%x\n",
5234 mboxq->vport ? mboxq->vport->vpi : 0,
5235 mboxq->u.mb.mbxCommand,
5236 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5237 psli->sli_flag, flag);
5238 goto out_not_finished;
5239 }
5240
5241 /* Put the mailbox command to the driver internal FIFO */
5242 psli->slistat.mbox_busy++;
5243 spin_lock_irqsave(&phba->hbalock, iflags);
5244 lpfc_mbox_put(phba, mboxq);
5245 spin_unlock_irqrestore(&phba->hbalock, iflags);
5246 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5247 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5248 "x%x (x%x) x%x x%x x%x\n",
5249 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5250 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5251 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5252 phba->pport->port_state,
5253 psli->sli_flag, MBX_NOWAIT);
5254 /* Wake up worker thread to transport mailbox command from head */
5255 lpfc_worker_wake_up(phba);
5256
5257 return MBX_BUSY;
5258
5259out_not_finished:
5260 return MBX_NOT_FINISHED;
5261}
5262
5263/**
5264 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5265 * @phba: Pointer to HBA context object.
5266 *
5267 * This function is called by worker thread to send a mailbox command to
5268 * SLI4 HBA firmware.
5269 *
5270 **/
5271int
5272lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5273{
5274 struct lpfc_sli *psli = &phba->sli;
5275 LPFC_MBOXQ_t *mboxq;
5276 int rc = MBX_SUCCESS;
5277 unsigned long iflags;
5278 struct lpfc_mqe *mqe;
5279 uint32_t mbx_cmnd;
5280
5281 /* Check interrupt mode before post async mailbox command */
5282 if (unlikely(!phba->sli4_hba.intr_enable))
5283 return MBX_NOT_FINISHED;
5284
5285 /* Check for mailbox command service token */
5286 spin_lock_irqsave(&phba->hbalock, iflags);
5287 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5288 spin_unlock_irqrestore(&phba->hbalock, iflags);
5289 return MBX_NOT_FINISHED;
5290 }
5291 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5292 spin_unlock_irqrestore(&phba->hbalock, iflags);
5293 return MBX_NOT_FINISHED;
5294 }
5295 if (unlikely(phba->sli.mbox_active)) {
5296 spin_unlock_irqrestore(&phba->hbalock, iflags);
5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5298 "0384 There is pending active mailbox cmd\n");
5299 return MBX_NOT_FINISHED;
5300 }
5301 /* Take the mailbox command service token */
5302 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5303
5304 /* Get the next mailbox command from head of queue */
5305 mboxq = lpfc_mbox_get(phba);
5306
5307 /* If no more mailbox command waiting for post, we're done */
5308 if (!mboxq) {
5309 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5310 spin_unlock_irqrestore(&phba->hbalock, iflags);
5311 return MBX_SUCCESS;
5312 }
5313 phba->sli.mbox_active = mboxq;
5314 spin_unlock_irqrestore(&phba->hbalock, iflags);
5315
5316 /* Check device readiness for posting mailbox command */
5317 rc = lpfc_mbox_dev_check(phba);
5318 if (unlikely(rc))
5319 /* Driver clean routine will clean up pending mailbox */
5320 goto out_not_finished;
5321
5322 /* Prepare the mbox command to be posted */
5323 mqe = &mboxq->u.mqe;
5324 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5325
5326 /* Start timer for the mbox_tmo and log some mailbox post messages */
5327 mod_timer(&psli->mbox_tmo, (jiffies +
5328 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5329
5330 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5331 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5332 "x%x x%x\n",
5333 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5334 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5335 phba->pport->port_state, psli->sli_flag);
5336
5337 if (mbx_cmnd != MBX_HEARTBEAT) {
5338 if (mboxq->vport) {
5339 lpfc_debugfs_disc_trc(mboxq->vport,
5340 LPFC_DISC_TRC_MBOX_VPORT,
5341 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5342 mbx_cmnd, mqe->un.mb_words[0],
5343 mqe->un.mb_words[1]);
5344 } else {
5345 lpfc_debugfs_disc_trc(phba->pport,
5346 LPFC_DISC_TRC_MBOX,
5347 "MBOX Send: cmd:x%x mb:x%x x%x",
5348 mbx_cmnd, mqe->un.mb_words[0],
5349 mqe->un.mb_words[1]);
5350 }
5351 }
5352 psli->slistat.mbox_cmd++;
5353
5354 /* Post the mailbox command to the port */
5355 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5356 if (rc != MBX_SUCCESS) {
5357 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5358 "(%d):2533 Mailbox command x%x (x%x) "
5359 "cannot issue Data: x%x x%x\n",
5360 mboxq->vport ? mboxq->vport->vpi : 0,
5361 mboxq->u.mb.mbxCommand,
5362 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5363 psli->sli_flag, MBX_NOWAIT);
5364 goto out_not_finished;
5365 }
5366
5367 return rc;
5368
5369out_not_finished:
5370 spin_lock_irqsave(&phba->hbalock, iflags);
5371 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5372 __lpfc_mbox_cmpl_put(phba, mboxq);
5373 /* Release the token */
5374 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5375 phba->sli.mbox_active = NULL;
5376 spin_unlock_irqrestore(&phba->hbalock, iflags);
5377
5378 return MBX_NOT_FINISHED;
5379}
5380
5381/**
5382 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5383 * @phba: Pointer to HBA context object.
5384 * @pmbox: Pointer to mailbox object.
5385 * @flag: Flag indicating how the mailbox need to be processed.
5386 *
5387 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5388 * the API jump table function pointer from the lpfc_hba struct.
5389 *
5390 * Return codes the caller owns the mailbox command after the return of the
5391 * function.
5392 **/
5393int
5394lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5395{
5396 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5397}
5398
5399/**
5400 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5401 * @phba: The hba struct for which this call is being executed.
5402 * @dev_grp: The HBA PCI-Device group number.
5403 *
5404 * This routine sets up the mbox interface API function jump table in @phba
5405 * struct.
5406 * Returns: 0 - success, -ENODEV - failure.
5407 **/
5408int
5409lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5410{
5411
5412 switch (dev_grp) {
5413 case LPFC_PCI_DEV_LP:
5414 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5415 phba->lpfc_sli_handle_slow_ring_event =
5416 lpfc_sli_handle_slow_ring_event_s3;
5417 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5418 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5419 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5420 break;
5421 case LPFC_PCI_DEV_OC:
5422 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5423 phba->lpfc_sli_handle_slow_ring_event =
5424 lpfc_sli_handle_slow_ring_event_s4;
5425 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5426 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5427 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5428 break;
5429 default:
5430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5431 "1420 Invalid HBA PCI-device group: 0x%x\n",
5432 dev_grp);
5433 return -ENODEV;
5434 break;
5435 }
5436 return 0;
5437}
5438
5439/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5440 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5441 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5442 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5488}
3702 5489
3703/** 5490/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5491 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5492 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5493 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5494 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5495 * @flag: Flag indicating if this command can be put into txq.
3709 * 5496 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5497 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5498 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5499 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5500 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5501 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5502 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5503 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5504 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5505 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5506 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5507 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5508 * This function is called with hbalock held. The function will return success
3722 * 5509 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5510 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5511 **/
3727static int 5512static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5513__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5514 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5515{
3731 struct lpfc_iocbq *nextiocb; 5516 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5517 IOCB_t *iocb;
5518 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5519
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5520 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5521 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5619 return IOCB_BUSY;
3834} 5620}
3835 5621
5622/**
5623 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5624 * @phba: Pointer to HBA context object.
5625 * @piocb: Pointer to command iocb.
5626 * @sglq: Pointer to the scatter gather queue object.
5627 *
5628 * This routine converts the bpl or bde that is in the IOCB
5629 * to a sgl list for the sli4 hardware. The physical address
5630 * of the bpl/bde is converted back to a virtual address.
5631 * If the IOCB contains a BPL then the list of BDE's is
5632 * converted to sli4_sge's. If the IOCB contains a single
5633 * BDE then it is converted to a single sli_sge.
5634 * The IOCB is still in cpu endianess so the contents of
5635 * the bpl can be used without byte swapping.
5636 *
5637 * Returns valid XRI = Success, NO_XRI = Failure.
5638**/
5639static uint16_t
5640lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5641 struct lpfc_sglq *sglq)
5642{
5643 uint16_t xritag = NO_XRI;
5644 struct ulp_bde64 *bpl = NULL;
5645 struct ulp_bde64 bde;
5646 struct sli4_sge *sgl = NULL;
5647 IOCB_t *icmd;
5648 int numBdes = 0;
5649 int i = 0;
5650
5651 if (!piocbq || !sglq)
5652 return xritag;
5653
5654 sgl = (struct sli4_sge *)sglq->sgl;
5655 icmd = &piocbq->iocb;
5656 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5657 numBdes = icmd->un.genreq64.bdl.bdeSize /
5658 sizeof(struct ulp_bde64);
5659 /* The addrHigh and addrLow fields within the IOCB
5660 * have not been byteswapped yet so there is no
5661 * need to swap them back.
5662 */
5663 bpl = (struct ulp_bde64 *)
5664 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5665
5666 if (!bpl)
5667 return xritag;
5668
5669 for (i = 0; i < numBdes; i++) {
5670 /* Should already be byte swapped. */
5671 sgl->addr_hi = bpl->addrHigh;
5672 sgl->addr_lo = bpl->addrLow;
5673 /* swap the size field back to the cpu so we
5674 * can assign it to the sgl.
5675 */
5676 bde.tus.w = le32_to_cpu(bpl->tus.w);
5677 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5678 if ((i+1) == numBdes)
5679 bf_set(lpfc_sli4_sge_last, sgl, 1);
5680 else
5681 bf_set(lpfc_sli4_sge_last, sgl, 0);
5682 sgl->word2 = cpu_to_le32(sgl->word2);
5683 sgl->word3 = cpu_to_le32(sgl->word3);
5684 bpl++;
5685 sgl++;
5686 }
5687 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5688 /* The addrHigh and addrLow fields of the BDE have not
5689 * been byteswapped yet so they need to be swapped
5690 * before putting them in the sgl.
5691 */
5692 sgl->addr_hi =
5693 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5694 sgl->addr_lo =
5695 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5696 bf_set(lpfc_sli4_sge_len, sgl,
5697 icmd->un.genreq64.bdl.bdeSize);
5698 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 sgl->word2 = cpu_to_le32(sgl->word2);
5700 sgl->word3 = cpu_to_le32(sgl->word3);
5701 }
5702 return sglq->sli4_xritag;
5703}
5704
5705/**
5706 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5707 * @phba: Pointer to HBA context object.
5708 * @piocb: Pointer to command iocb.
5709 *
5710 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5711 * distribution.
5712 *
5713 * Return: index into SLI4 fast-path FCP queue index.
5714 **/
5715static uint32_t
5716lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5717{
5718 static uint32_t fcp_qidx;
5719
5720 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5721}
5722
5723/**
5724 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5725 * @phba: Pointer to HBA context object.
5726 * @piocb: Pointer to command iocb.
5727 * @wqe: Pointer to the work queue entry.
5728 *
5729 * This routine converts the iocb command to its Work Queue Entry
5730 * equivalent. The wqe pointer should not have any fields set when
5731 * this routine is called because it will memcpy over them.
5732 * This routine does not set the CQ_ID or the WQEC bits in the
5733 * wqe.
5734 *
5735 * Returns: 0 = Success, IOCB_ERROR = Failure.
5736 **/
5737static int
5738lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5739 union lpfc_wqe *wqe)
5740{
5741 uint32_t payload_len = 0;
5742 uint8_t ct = 0;
5743 uint32_t fip;
5744 uint32_t abort_tag;
5745 uint8_t command_type = ELS_COMMAND_NON_FIP;
5746 uint8_t cmnd;
5747 uint16_t xritag;
5748 struct ulp_bde64 *bpl = NULL;
5749
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND;
5758 else {
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5760 "2019 Invalid cmd 0x%x\n",
5761 iocbq->iocb.ulpCommand);
5762 return IOCB_ERROR;
5763 }
5764 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag;
5767 xritag = iocbq->sli4_xritag;
5768 wqe->words[7] = 0; /* The ct field has moved so reset */
5769 /* words0-2 bpl convert bde */
5770 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5771 bpl = (struct ulp_bde64 *)
5772 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5773 if (!bpl)
5774 return IOCB_ERROR;
5775
5776 /* Should already be byte swapped. */
5777 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5778 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5779 /* swap the size field back to the cpu so we
5780 * can assign it to the sgl.
5781 */
5782 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5783 payload_len = wqe->generic.bde.tus.f.bdeSize;
5784 } else
5785 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5786
5787 iocbq->iocb.ulpIoTag = iocbq->iotag;
5788 cmnd = iocbq->iocb.ulpCommand;
5789
5790 switch (iocbq->iocb.ulpCommand) {
5791 case CMD_ELS_REQUEST64_CR:
5792 if (!iocbq->iocb.ulpLe) {
5793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5794 "2007 Only Limited Edition cmd Format"
5795 " supported 0x%x\n",
5796 iocbq->iocb.ulpCommand);
5797 return IOCB_ERROR;
5798 }
5799 wqe->els_req.payload_len = payload_len;
5800 /* Els_reguest64 has a TMO */
5801 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5802 iocbq->iocb.ulpTimeout);
5803 /* Need a VF for word 4 set the vf bit*/
5804 bf_set(els_req64_vf, &wqe->els_req, 0);
5805 /* And a VFID for word 12 */
5806 bf_set(els_req64_vfid, &wqe->els_req, 0);
5807 /*
5808 * Set ct field to 3, indicates that the context_tag field
5809 * contains the FCFI and remote N_Port_ID is
5810 * in word 5.
5811 */
5812
5813 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext);
5816
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5825 break;
5826 case CMD_XMIT_SEQUENCE64_CR:
5827 /* word3 iocb=io_tag32 wqe=payload_offset */
5828 /* payload offset used for multilpe outstanding
5829 * sequences on the same exchange
5830 */
5831 wqe->words[3] = 0;
5832 /* word4 relative_offset memcpy */
5833 /* word5 r_ctl/df_ctl memcpy */
5834 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5835 wqe->xmit_sequence.xmit_len = payload_len;
5836 break;
5837 case CMD_XMIT_BCAST64_CN:
5838 /* word3 iocb=iotag32 wqe=payload_len */
5839 wqe->words[3] = 0; /* no definition for this in wqe */
5840 /* word4 iocb=rsvd wqe=rsvd */
5841 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5842 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5843 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5844 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5845 break;
5846 case CMD_FCP_IWRITE64_CR:
5847 command_type = FCP_COMMAND_DATA_OUT;
5848 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5849 * confusing.
5850 * word3 is payload_len: byte offset to the sgl entry for the
5851 * fcp_command.
5852 * word4 is total xfer len, same as the IOCB->ulpParameter.
5853 * word5 is initial xfer len 0 = wait for xfer-ready
5854 */
5855
5856 /* Always wait for xfer-ready before sending data */
5857 wqe->fcp_iwrite.initial_xfer_len = 0;
5858 /* word 4 (xfer length) should have been set on the memcpy */
5859
5860 /* allow write to fall through to read */
5861 case CMD_FCP_IREAD64_CR:
5862 /* FCP_CMD is always the 1st sgl entry */
5863 wqe->fcp_iread.payload_len =
5864 payload_len + sizeof(struct fcp_rsp);
5865
5866 /* word 4 (xfer length) should have been set on the memcpy */
5867
5868 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5869 iocbq->iocb.ulpFCP2Rcvy);
5870 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5871 /* The XC bit and the XS bit are similar. The driver never
5872 * tracked whether or not the exchange was previouslly open.
5873 * XC = Exchange create, 0 is create. 1 is already open.
5874 * XS = link cmd: 1 do not close the exchange after command.
5875 * XS = 0 close exchange when command completes.
5876 * The only time we would not set the XC bit is when the XS bit
5877 * is set and we are sending our 2nd or greater command on
5878 * this exchange.
5879 */
5880
5881 /* ALLOW read & write to fall through to ICMD64 */
5882 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5888 break;
5889 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the
5891 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5892 * sgl[0] = cmnd
5893 * sgl[1] = rsp.
5894 *
5895 */
5896 wqe->gen_req.command_len = payload_len;
5897 /* Word4 parameter copied in the memcpy */
5898 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5899 /* word6 context tag copied in memcpy */
5900 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
5901 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5903 "2015 Invalid CT %x command 0x%x\n",
5904 ct, iocbq->iocb.ulpCommand);
5905 return IOCB_ERROR;
5906 }
5907 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5908 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910
5911 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5912 command_type = OTHER_COMMAND;
5913 break;
5914 case CMD_XMIT_ELS_RSP64_CX:
5915 /* words0-2 BDE memcpy */
5916 /* word3 iocb=iotag32 wqe=rsvd */
5917 wqe->words[3] = 0;
5918 /* word4 iocb=did wge=rsvd. */
5919 wqe->words[4] = 0;
5920 /* word5 iocb=rsvd wge=did */
5921 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5922 iocbq->iocb.un.elsreq64.remoteID);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5925 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5926
5927 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5928 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5929 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5930 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5931 iocbq->vport->vpi + phba->vpi_base);
5932 command_type = OTHER_COMMAND;
5933 break;
5934 case CMD_CLOSE_XRI_CN:
5935 case CMD_ABORT_XRI_CN:
5936 case CMD_ABORT_XRI_CX:
5937 /* words 0-2 memcpy should be 0 rserved */
5938 /* port will send abts */
5939 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5940 /*
5941 * The link is down so the fw does not need to send abts
5942 * on the wire.
5943 */
5944 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5945 else
5946 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5947 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5948 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5949 wqe->words[5] = 0;
5950 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5951 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5952 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5953 wqe->generic.abort_tag = abort_tag;
5954 /*
5955 * The abort handler will send us CMD_ABORT_XRI_CN or
5956 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5957 */
5958 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
5959 cmnd = CMD_ABORT_XRI_CX;
5960 command_type = OTHER_COMMAND;
5961 xritag = 0;
5962 break;
5963 case CMD_XRI_ABORTED_CX:
5964 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
5965 /* words0-2 are all 0's no bde */
5966 /* word3 and word4 are rsvrd */
5967 wqe->words[3] = 0;
5968 wqe->words[4] = 0;
5969 /* word5 iocb=rsvd wge=did */
5970 /* There is no remote port id in the IOCB? */
5971 /* Let this fall through and fail */
5972 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
5973 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
5974 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
5975 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
5976 default:
5977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5978 "2014 Invalid command 0x%x\n",
5979 iocbq->iocb.ulpCommand);
5980 return IOCB_ERROR;
5981 break;
5982
5983 }
5984 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
5985 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
5986 wqe->generic.abort_tag = abort_tag;
5987 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
5988 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
5989 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
5990 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
5991
5992 return 0;
5993}
5994
5995/**
5996 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
5997 * @phba: Pointer to HBA context object.
5998 * @ring_number: SLI ring number to issue iocb on.
5999 * @piocb: Pointer to command iocb.
6000 * @flag: Flag indicating if this command can be put into txq.
6001 *
6002 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6003 * an iocb command to an HBA with SLI-4 interface spec.
6004 *
6005 * This function is called with hbalock held. The function will return success
6006 * after it successfully submit the iocb to firmware or after adding to the
6007 * txq.
6008 **/
6009static int
6010__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6011 struct lpfc_iocbq *piocb, uint32_t flag)
6012{
6013 struct lpfc_sglq *sglq;
6014 uint16_t xritag;
6015 union lpfc_wqe wqe;
6016 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6017 uint32_t fcp_wqidx;
6018
6019 if (piocb->sli4_xritag == NO_XRI) {
6020 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6021 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6022 sglq = NULL;
6023 else {
6024 sglq = __lpfc_sli_get_sglq(phba);
6025 if (!sglq)
6026 return IOCB_ERROR;
6027 piocb->sli4_xritag = sglq->sli4_xritag;
6028 }
6029 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6030 sglq = NULL; /* These IO's already have an XRI and
6031 * a mapped sgl.
6032 */
6033 } else {
6034 /* This is a continuation of a commandi,(CX) so this
6035 * sglq is on the active list
6036 */
6037 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6038 if (!sglq)
6039 return IOCB_ERROR;
6040 }
6041
6042 if (sglq) {
6043 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6044 if (xritag != sglq->sli4_xritag)
6045 return IOCB_ERROR;
6046 }
6047
6048 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6049 return IOCB_ERROR;
6050
6051 if (piocb->iocb_flag & LPFC_IO_FCP) {
6052 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6053 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6054 return IOCB_ERROR;
6055 } else {
6056 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6057 return IOCB_ERROR;
6058 }
6059 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6060
6061 return 0;
6062}
6063
6064/**
6065 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6066 *
6067 * This routine wraps the actual lockless version for issusing IOCB function
6068 * pointer from the lpfc_hba struct.
6069 *
6070 * Return codes:
6071 * IOCB_ERROR - Error
6072 * IOCB_SUCCESS - Success
6073 * IOCB_BUSY - Busy
6074 **/
6075static inline int
6076__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6077 struct lpfc_iocbq *piocb, uint32_t flag)
6078{
6079 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6080}
6081
6082/**
6083 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6084 * @phba: The hba struct for which this call is being executed.
6085 * @dev_grp: The HBA PCI-Device group number.
6086 *
6087 * This routine sets up the SLI interface API function jump table in @phba
6088 * struct.
6089 * Returns: 0 - success, -ENODEV - failure.
6090 **/
6091int
6092lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6093{
6094
6095 switch (dev_grp) {
6096 case LPFC_PCI_DEV_LP:
6097 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6098 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6099 break;
6100 case LPFC_PCI_DEV_OC:
6101 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6102 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6103 break;
6104 default:
6105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6106 "1419 Invalid HBA PCI-device group: 0x%x\n",
6107 dev_grp);
6108 return -ENODEV;
6109 break;
6110 }
6111 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6112 return 0;
6113}
3836 6114
3837/** 6115/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6116 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6126 * functions which do not hold hbalock.
3849 **/ 6127 **/
3850int 6128int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6129lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6130 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6131{
3854 unsigned long iflags; 6132 unsigned long iflags;
3855 int rc; 6133 int rc;
3856 6134
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6135 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6136 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6137 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6138
3861 return rc; 6139 return rc;
@@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6426}
4149 6427
4150/** 6428/**
6429 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6430 * @phba: Pointer to HBA context object.
6431 *
6432 * This routine flushes the mailbox command subsystem. It will unconditionally
6433 * flush all the mailbox commands in the three possible stages in the mailbox
6434 * command sub-system: pending mailbox command queue; the outstanding mailbox
6435 * command; and completed mailbox command queue. It is caller's responsibility
6436 * to make sure that the driver is in the proper state to flush the mailbox
6437 * command sub-system. Namely, the posting of mailbox commands into the
6438 * pending mailbox command queue from the various clients must be stopped;
6439 * either the HBA is in a state that it will never works on the outstanding
6440 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6441 * mailbox command has been completed.
6442 **/
6443static void
6444lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6445{
6446 LIST_HEAD(completions);
6447 struct lpfc_sli *psli = &phba->sli;
6448 LPFC_MBOXQ_t *pmb;
6449 unsigned long iflag;
6450
6451 /* Flush all the mailbox commands in the mbox system */
6452 spin_lock_irqsave(&phba->hbalock, iflag);
6453 /* The pending mailbox command queue */
6454 list_splice_init(&phba->sli.mboxq, &completions);
6455 /* The outstanding active mailbox command */
6456 if (psli->mbox_active) {
6457 list_add_tail(&psli->mbox_active->list, &completions);
6458 psli->mbox_active = NULL;
6459 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6460 }
6461 /* The completed mailbox command queue */
6462 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6463 spin_unlock_irqrestore(&phba->hbalock, iflag);
6464
6465 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6466 while (!list_empty(&completions)) {
6467 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6468 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6469 if (pmb->mbox_cmpl)
6470 pmb->mbox_cmpl(phba, pmb);
6471 }
6472}
6473
6474/**
4151 * lpfc_sli_host_down - Vport cleanup function 6475 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6476 * @vport: Pointer to virtual port object.
4153 * 6477 *
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6564 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6565 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6566 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6567 unsigned long flags = 0;
6568 int i;
6569
6570 /* Shutdown the mailbox command sub-system */
6571 lpfc_sli_mbox_sys_shutdown(phba);
4246 6572
4247 lpfc_hba_down_prep(phba); 6573 lpfc_hba_down_prep(phba);
4248 6574
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6613
4288 /* Return any active mbox cmds */ 6614 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6615 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6616
4292 spin_lock(&phba->pport->work_port_lock); 6617 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6618 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6619 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6620
4296 /* Return any pending or completed mbox cmds */ 6621 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6622}
4298 if (psli->mbox_active) { 6623
4299 list_add_tail(&psli->mbox_active->list, &completions); 6624/**
4300 psli->mbox_active = NULL; 6625 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6626 * @phba: Pointer to HBA context object.
4302 } 6627 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6628 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6629 * shutting down the SLI4 HBA FCoE function. This function is called with no
6630 * lock held and always returns 1.
6631 *
6632 * This function does the following to cleanup driver FCoE function resources:
6633 * - Free discovery resources for each virtual port
6634 * - Cleanup any pending fabric iocbs
6635 * - Iterate through the iocb txq and free each entry in the list.
6636 * - Free up any buffer posted to the HBA.
6637 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6638 * - Free mailbox commands in the mailbox queue.
6639 **/
6640int
6641lpfc_sli4_hba_down(struct lpfc_hba *phba)
6642{
6643 /* Stop the SLI4 device port */
6644 lpfc_stop_port(phba);
6645
6646 /* Tear down the queues in the HBA */
6647 lpfc_sli4_queue_unset(phba);
6648
6649 /* unregister default FCFI from the HBA */
6650 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6651
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6652 return 1;
4313} 6653}
4314 6654
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 6979 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 6980 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 6981 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 6982 if (phba->sli_rev == LPFC_SLI_REV4)
6983 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
6984 else
6985 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 6986 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 6987 iabt->ulpClass = icmd->ulpClass;
4645 6988
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 6998 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 6999 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7000 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7001 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7002
4660 if (retval) 7003 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7004 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7181 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7182 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7183 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7184 if (phba->sli_rev == LPFC_SLI_REV4)
7185 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7186 else
7187 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7188 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7189 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7190 abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7196
4851 /* Setup callback routine and issue the command. */ 7197 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7198 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7199 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7200 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7201 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7202 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7203 errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7278 **/
4932int 7279int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7280lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7281 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7282 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7283 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7284 uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7309 readl(phba->HCregaddr); /* flush */
4963 } 7310 }
4964 7311
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7312 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7313 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7314 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7315 timeleft = wait_event_timeout(done_q,
@@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7424}
5078 7425
5079/** 7426/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7427 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7428 * @phba: Pointer to HBA context.
5082 * 7429 *
5083 * This function is called to cleanup any pending mailbox 7430 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7431 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7432 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7433 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7434 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7435 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7436 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7437 * as with offline or HBA function reset), this routine will wait for the
7438 * outstanding mailbox command to complete before invoking the mailbox
7439 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7440 **/
5091int 7441void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7442lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7443{
5094 struct lpfc_vport *vport = phba->pport; 7444 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7445 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7446 unsigned long timeout;
5097 7447
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7448 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7449 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7450 spin_unlock_irq(&phba->hbalock);
5101 7451
5102 /* 7452 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7453 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7454 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7455 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7456 spin_unlock_irq(&phba->hbalock);
7457 /* Determine how long we might wait for the active mailbox
7458 * command to be gracefully completed by firmware.
7459 */
7460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7461 1000) + jiffies;
7462 while (phba->sli.mbox_active) {
7463 /* Check active mailbox complete status every 2ms */
7464 msleep(2);
7465 if (time_after(jiffies, timeout))
7466 /* Timeout, let the mailbox flush routine to
7467 * forcefully release active mailbox command
7468 */
7469 break;
7470 }
7471 }
7472 lpfc_sli_mbox_sys_flush(phba);
7473}
7474
7475/**
7476 * lpfc_sli_eratt_read - read sli-3 error attention events
7477 * @phba: Pointer to HBA context.
7478 *
7479 * This function is called to read the SLI3 device error attention registers
7480 * for possible error attention events. The caller must hold the hostlock
7481 * with spin_lock_irq().
7482 *
7483 * This fucntion returns 1 when there is Error Attention in the Host Attention
7484 * Register and returns 0 otherwise.
7485 **/
7486static int
7487lpfc_sli_eratt_read(struct lpfc_hba *phba)
7488{
7489 uint32_t ha_copy;
5111 7490
5112 if (ha_copy & HA_MBATT) 7491 /* Read chip Host Attention (HA) register */
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7492 ha_copy = readl(phba->HAregaddr);
5114 i = 0; 7493 if (ha_copy & HA_ERATT) {
7494 /* Read host status register to retrieve error event */
7495 lpfc_sli_read_hs(phba);
7496
7497 /* Check if there is a deferred error condition is active */
7498 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr);
7507 }
5115 7508
5116 msleep(1); 7509 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1;
5117 } 7516 }
7517 return 0;
7518}
7519
7520/**
7521 * lpfc_sli4_eratt_read - read sli-4 error attention events
7522 * @phba: Pointer to HBA context.
7523 *
7524 * This function is called to read the SLI4 device error attention registers
7525 * for possible error attention events. The caller must hold the hostlock
7526 * with spin_lock_irq().
7527 *
7528 * This fucntion returns 1 when there is Error Attention in the Host Attention
7529 * Register and returns 0 otherwise.
7530 **/
7531static int
7532lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7533{
7534 uint32_t uerr_sta_hi, uerr_sta_lo;
7535 uint32_t onlnreg0, onlnreg1;
5118 7536
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7537 /* For now, use the SLI4 device internal unrecoverable error
7538 * registers for error attention. This can be changed later.
7539 */
7540 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7541 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7542 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7543 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7544 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7545 if (uerr_sta_lo || uerr_sta_hi) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7547 "1423 HBA Unrecoverable error: "
7548 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7549 "online0_reg=0x%x, online1_reg=0x%x\n",
7550 uerr_sta_lo, uerr_sta_hi,
7551 onlnreg0, onlnreg1);
7552 /* TEMP: as the driver error recover logic is not
7553 * fully developed, we just log the error message
7554 * and the device error attention action is now
7555 * temporarily disabled.
7556 */
7557 return 0;
7558 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1;
7567 }
7568 }
7569 return 0;
5120} 7570}
5121 7571
5122/** 7572/**
5123 * lpfc_sli_check_eratt - check error attention events 7573 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7574 * @phba: Pointer to HBA context.
5125 * 7575 *
5126 * This function is called form timer soft interrupt context to check HBA's 7576 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7577 * error attention register bit for error attention events.
5128 * 7578 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7579 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7584{
5135 uint32_t ha_copy; 7585 uint32_t ha_copy;
5136 7586
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7587 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7588 * here. The brdkill function will do this.
5143 */ 7589 */
@@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7607 return 0;
5162 } 7608 }
5163 7609
5164 /* Read chip Host Attention (HA) register */ 7610 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7611 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7612 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7613 return 0;
7614 }
7615
7616 switch (phba->sli_rev) {
7617 case LPFC_SLI_REV2:
7618 case LPFC_SLI_REV3:
7619 /* Read chip Host Attention (HA) register */
7620 ha_copy = lpfc_sli_eratt_read(phba);
7621 break;
7622 case LPFC_SLI_REV4:
7623 /* Read devcie Uncoverable Error (UERR) registers */
7624 ha_copy = lpfc_sli4_eratt_read(phba);
7625 break;
7626 default:
7627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7628 "0299 Invalid SLI revision (%d)\n",
7629 phba->sli_rev);
7630 ha_copy = 0;
7631 break;
5186 } 7632 }
5187 spin_unlock_irq(&phba->hbalock); 7633 spin_unlock_irq(&phba->hbalock);
7634
7635 return ha_copy;
7636}
7637
7638/**
7639 * lpfc_intr_state_check - Check device state for interrupt handling
7640 * @phba: Pointer to HBA context.
7641 *
7642 * This inline routine checks whether a device or its PCI slot is in a state
7643 * that the interrupt should be handled.
7644 *
7645 * This function returns 0 if the device or the PCI slot is in a state that
7646 * interrupt should be handled, otherwise -EIO.
7647 */
7648static inline int
7649lpfc_intr_state_check(struct lpfc_hba *phba)
7650{
7651 /* If the pci channel is offline, ignore all the interrupts */
7652 if (unlikely(pci_channel_offline(phba->pcidev)))
7653 return -EIO;
7654
7655 /* Update device level interrupt statistics */
7656 phba->sli.slistat.sli_intr++;
7657
7658 /* Ignore all interrupts during initialization. */
7659 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7660 return -EIO;
7661
5188 return 0; 7662 return 0;
5189} 7663}
5190 7664
5191/** 7665/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7666 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7667 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7668 * @dev_id: The device context pointer.
5195 * 7669 *
5196 * This function is directly called from the PCI layer as an interrupt 7670 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7671 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7672 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7673 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7674 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7675 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7676 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7677 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7678 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7679 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7680 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7681 * structures.
5208 * 7682 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7683 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7684 * returns IRQ_NONE.
5211 **/ 7685 **/
5212irqreturn_t 7686irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7687lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7688{
5215 struct lpfc_hba *phba; 7689 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7690 uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7714 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7715 */
5242 if (phba->intr_type == MSIX) { 7716 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7717 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7718 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7719 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7720 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7721 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7740 * interrupt.
5272 */ 7741 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7742 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7743 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7744 return IRQ_NONE;
5276 } 7745 }
5277 7746
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7833
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7834 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7835 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7836 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7837 mbox = phba->mbox;
5369 vport = pmb->vport; 7838 vport = pmb->vport;
5370 7839
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 7903 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 7904 "0350 rc should have"
5436 "been MBX_BUSY"); 7905 "been MBX_BUSY");
5437 goto send_current_mbox; 7906 if (rc != MBX_NOT_FINISHED)
7907 goto send_current_mbox;
5438 } 7908 }
5439 } 7909 }
5440 spin_lock_irqsave( 7910 spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
5471 } 7941 }
5472 return IRQ_HANDLED; 7942 return IRQ_HANDLED;
5473 7943
5474} /* lpfc_sp_intr_handler */ 7944} /* lpfc_sli_sp_intr_handler */
5475 7945
5476/** 7946/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 7947 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 7948 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 7949 * @dev_id: The device context pointer.
5480 * 7950 *
5481 * This function is directly called from the PCI layer as an interrupt 7951 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 7952 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 7953 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 7954 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 7955 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 7956 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 7957 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 7958 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 7959 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 7960 * It gets the hbalock to access and update SLI data structures.
5491 * 7961 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 7962 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 7963 * returns IRQ_NONE.
5494 **/ 7964 **/
5495irqreturn_t 7965irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 7966lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 7967{
5498 struct lpfc_hba *phba; 7968 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 7969 uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 7983 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 7984 */
5515 if (phba->intr_type == MSIX) { 7985 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 7986 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 7987 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 7988 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 7989 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 7990 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 7995 * any interrupt.
5531 */ 7996 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7997 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 7998 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 7999 return IRQ_NONE;
5535 } 8000 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8001 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8031 }
5567 } 8032 }
5568 return IRQ_HANDLED; 8033 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8034} /* lpfc_sli_fp_intr_handler */
5570 8035
5571/** 8036/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8037 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8038 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8039 * @dev_id: The device context pointer.
5575 * 8040 *
5576 * This function is the device-level interrupt handler called from the PCI 8041 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8042 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8043 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8044 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8045 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8046 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8047 * function is called without any lock held. It gets the hbalock to access
8048 * and update SLI data structures.
5583 * 8049 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8050 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8051 * returns IRQ_NONE.
5586 **/ 8052 **/
5587irqreturn_t 8053irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8054lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8055{
5590 struct lpfc_hba *phba; 8056 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8057 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8066 if (unlikely(!phba))
5601 return IRQ_NONE; 8067 return IRQ_NONE;
5602 8068
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8069 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8070 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8071 return IRQ_NONE;
5613 8072
5614 spin_lock(&phba->hbalock); 8073 spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8109 status2 >>= (4*LPFC_ELS_RING);
5651 8110
5652 if (status1 || (status2 & HA_RXMASK)) 8111 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8112 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8113 else
5655 sp_irq_rc = IRQ_NONE; 8114 sp_irq_rc = IRQ_NONE;
5656 8115
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8129 status2 = 0;
5671 8130
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8131 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8132 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8133 else
5675 fp_irq_rc = IRQ_NONE; 8134 fp_irq_rc = IRQ_NONE;
5676 8135
5677 /* Return device-level interrupt handling status */ 8136 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8137 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8138} /* lpfc_sli_intr_handler */
8139
8140/**
8141 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8142 * @phba: pointer to lpfc hba data structure.
8143 *
8144 * This routine is invoked by the worker thread to process all the pending
8145 * SLI4 FCP abort XRI events.
8146 **/
8147void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8148{
8149 struct lpfc_cq_event *cq_event;
8150
8151 /* First, declare the fcp xri abort event has been handled */
8152 spin_lock_irq(&phba->hbalock);
8153 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8154 spin_unlock_irq(&phba->hbalock);
8155 /* Now, handle all the fcp xri abort events */
8156 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8157 /* Get the first event from the head of the event queue */
8158 spin_lock_irq(&phba->hbalock);
8159 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8160 cq_event, struct lpfc_cq_event, list);
8161 spin_unlock_irq(&phba->hbalock);
8162 /* Notify aborted XRI for FCP work queue */
8163 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8164 /* Free the event processed back to the free pool */
8165 lpfc_sli4_cq_event_release(phba, cq_event);
8166 }
8167}
8168
8169/**
8170 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8171 * @phba: pointer to lpfc hba data structure.
8172 *
8173 * This routine is invoked by the worker thread to process all the pending
8174 * SLI4 els abort xri events.
8175 **/
8176void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8177{
8178 struct lpfc_cq_event *cq_event;
8179
8180 /* First, declare the els xri abort event has been handled */
8181 spin_lock_irq(&phba->hbalock);
8182 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8183 spin_unlock_irq(&phba->hbalock);
8184 /* Now, handle all the els xri abort events */
8185 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8186 /* Get the first event from the head of the event queue */
8187 spin_lock_irq(&phba->hbalock);
8188 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8189 cq_event, struct lpfc_cq_event, list);
8190 spin_unlock_irq(&phba->hbalock);
8191 /* Notify aborted XRI for ELS work queue */
8192 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8193 /* Free the event processed back to the free pool */
8194 lpfc_sli4_cq_event_release(phba, cq_event);
8195 }
8196}
8197
8198static void
8199lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8200 struct lpfc_iocbq *pIocbOut,
8201 struct lpfc_wcqe_complete *wcqe)
8202{
8203 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8204
8205 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8206 sizeof(struct lpfc_iocbq) - offset);
8207 memset(&pIocbIn->sli4_info, 0,
8208 sizeof(struct lpfc_sli4_rspiocb_info));
8209 /* Map WCQE parameters into irspiocb parameters */
8210 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8211 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8212 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8213 pIocbIn->iocb.un.fcpi.fcpi_parm =
8214 pIocbOut->iocb.un.fcpi.fcpi_parm -
8215 wcqe->total_data_placed;
8216 else
8217 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8218 else
8219 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8220 /* Load in additional WCQE parameters */
8221 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8222 pIocbIn->sli4_info.bfield = 0;
8223 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8224 pIocbIn->sli4_info.bfield |= LPFC_XB;
8225 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8226 pIocbIn->sli4_info.bfield |= LPFC_PV;
8227 pIocbIn->sli4_info.priority =
8228 bf_get(lpfc_wcqe_c_priority, wcqe);
8229 }
8230}
8231
8232/**
8233 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8234 * @phba: Pointer to HBA context object.
8235 * @cqe: Pointer to mailbox completion queue entry.
8236 *
8237 * This routine process a mailbox completion queue entry with asynchrous
8238 * event.
8239 *
8240 * Return: true if work posted to worker thread, otherwise false.
8241 **/
8242static bool
8243lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8244{
8245 struct lpfc_cq_event *cq_event;
8246 unsigned long iflags;
8247
8248 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8249 "0392 Async Event: word0:x%x, word1:x%x, "
8250 "word2:x%x, word3:x%x\n", mcqe->word0,
8251 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8252
8253 /* Allocate a new internal CQ_EVENT entry */
8254 cq_event = lpfc_sli4_cq_event_alloc(phba);
8255 if (!cq_event) {
8256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8257 "0394 Failed to allocate CQ_EVENT entry\n");
8258 return false;
8259 }
8260
8261 /* Move the CQE into an asynchronous event entry */
8262 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8263 spin_lock_irqsave(&phba->hbalock, iflags);
8264 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8265 /* Set the async event flag */
8266 phba->hba_flag |= ASYNC_EVENT;
8267 spin_unlock_irqrestore(&phba->hbalock, iflags);
8268
8269 return true;
8270}
8271
8272/**
8273 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8274 * @phba: Pointer to HBA context object.
8275 * @cqe: Pointer to mailbox completion queue entry.
8276 *
8277 * This routine process a mailbox completion queue entry with mailbox
8278 * completion event.
8279 *
8280 * Return: true if work posted to worker thread, otherwise false.
8281 **/
8282static bool
8283lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8284{
8285 uint32_t mcqe_status;
8286 MAILBOX_t *mbox, *pmbox;
8287 struct lpfc_mqe *mqe;
8288 struct lpfc_vport *vport;
8289 struct lpfc_nodelist *ndlp;
8290 struct lpfc_dmabuf *mp;
8291 unsigned long iflags;
8292 LPFC_MBOXQ_t *pmb;
8293 bool workposted = false;
8294 int rc;
8295
8296 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8297 if (!bf_get(lpfc_trailer_completed, mcqe))
8298 goto out_no_mqe_complete;
8299
8300 /* Get the reference to the active mbox command */
8301 spin_lock_irqsave(&phba->hbalock, iflags);
8302 pmb = phba->sli.mbox_active;
8303 if (unlikely(!pmb)) {
8304 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8305 "1832 No pending MBOX command to handle\n");
8306 spin_unlock_irqrestore(&phba->hbalock, iflags);
8307 goto out_no_mqe_complete;
8308 }
8309 spin_unlock_irqrestore(&phba->hbalock, iflags);
8310 mqe = &pmb->u.mqe;
8311 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8312 mbox = phba->mbox;
8313 vport = pmb->vport;
8314
8315 /* Reset heartbeat timer */
8316 phba->last_completion_time = jiffies;
8317 del_timer(&phba->sli.mbox_tmo);
8318
8319 /* Move mbox data to caller's mailbox region, do endian swapping */
8320 if (pmb->mbox_cmpl && mbox)
8321 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8322 /* Set the mailbox status with SLI4 range 0x4000 */
8323 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8324 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8325 bf_set(lpfc_mqe_status, mqe,
8326 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8327
8328 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8329 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8331 "MBOX dflt rpi: status:x%x rpi:x%x",
8332 mcqe_status,
8333 pmbox->un.varWords[0], 0);
8334 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8335 mp = (struct lpfc_dmabuf *)(pmb->context1);
8336 ndlp = (struct lpfc_nodelist *)pmb->context2;
8337 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8338 * RID of the PPI using the same mbox buffer.
8339 */
8340 lpfc_unreg_login(phba, vport->vpi,
8341 pmbox->un.varWords[0], pmb);
8342 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8343 pmb->context1 = mp;
8344 pmb->context2 = ndlp;
8345 pmb->vport = vport;
8346 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8347 if (rc != MBX_BUSY)
8348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8349 LOG_SLI, "0385 rc should "
8350 "have been MBX_BUSY\n");
8351 if (rc != MBX_NOT_FINISHED)
8352 goto send_current_mbox;
8353 }
8354 }
8355 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8356 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8357 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8358
8359 /* There is mailbox completion work to do */
8360 spin_lock_irqsave(&phba->hbalock, iflags);
8361 __lpfc_mbox_cmpl_put(phba, pmb);
8362 phba->work_ha |= HA_MBATT;
8363 spin_unlock_irqrestore(&phba->hbalock, iflags);
8364 workposted = true;
8365
8366send_current_mbox:
8367 spin_lock_irqsave(&phba->hbalock, iflags);
8368 /* Release the mailbox command posting token */
8369 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8370 /* Setting active mailbox pointer need to be in sync to flag clear */
8371 phba->sli.mbox_active = NULL;
8372 spin_unlock_irqrestore(&phba->hbalock, iflags);
8373 /* Wake up worker thread to post the next pending mailbox command */
8374 lpfc_worker_wake_up(phba);
8375out_no_mqe_complete:
8376 if (bf_get(lpfc_trailer_consumed, mcqe))
8377 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8378 return workposted;
8379}
8380
8381/**
8382 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8383 * @phba: Pointer to HBA context object.
8384 * @cqe: Pointer to mailbox completion queue entry.
8385 *
8386 * This routine process a mailbox completion queue entry, it invokes the
8387 * proper mailbox complete handling or asynchrous event handling routine
8388 * according to the MCQE's async bit.
8389 *
8390 * Return: true if work posted to worker thread, otherwise false.
8391 **/
8392static bool
8393lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8394{
8395 struct lpfc_mcqe mcqe;
8396 bool workposted;
8397
8398 /* Copy the mailbox MCQE and convert endian order as needed */
8399 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8400
8401 /* Invoke the proper event handling routine */
8402 if (!bf_get(lpfc_trailer_async, &mcqe))
8403 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8404 else
8405 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8406 return workposted;
8407}
8408
8409/**
8410 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8411 * @phba: Pointer to HBA context object.
8412 * @wcqe: Pointer to work-queue completion queue entry.
8413 *
8414 * This routine handles an ELS work-queue completion event.
8415 *
8416 * Return: true if work posted to worker thread, otherwise false.
8417 **/
8418static bool
8419lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8420 struct lpfc_wcqe_complete *wcqe)
8421{
8422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8423 struct lpfc_iocbq *cmdiocbq;
8424 struct lpfc_iocbq *irspiocbq;
8425 unsigned long iflags;
8426 bool workposted = false;
8427
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pring->stats.iocb_event++;
8430 /* Look up the ELS command IOCB and create pseudo response IOCB */
8431 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8432 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434
8435 if (unlikely(!cmdiocbq)) {
8436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8437 "0386 ELS complete with no corresponding "
8438 "cmdiocb: iotag (%d)\n",
8439 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8440 return workposted;
8441 }
8442
8443 /* Fake the irspiocbq and copy necessary response information */
8444 irspiocbq = lpfc_sli_get_iocbq(phba);
8445 if (!irspiocbq) {
8446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8447 "0387 Failed to allocate an iocbq\n");
8448 return workposted;
8449 }
8450 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8451
8452 /* Add the irspiocb to the response IOCB work list */
8453 spin_lock_irqsave(&phba->hbalock, iflags);
8454 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8455 /* Indicate ELS ring attention */
8456 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8457 spin_unlock_irqrestore(&phba->hbalock, iflags);
8458 workposted = true;
8459
8460 return workposted;
8461}
8462
8463/**
8464 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8465 * @phba: Pointer to HBA context object.
8466 * @wcqe: Pointer to work-queue completion queue entry.
8467 *
8468 * This routine handles slow-path WQ entry comsumed event by invoking the
8469 * proper WQ release routine to the slow-path WQ.
8470 **/
8471static void
8472lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8473 struct lpfc_wcqe_release *wcqe)
8474{
8475 /* Check for the slow-path ELS work queue */
8476 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8477 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8478 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8479 else
8480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8481 "2579 Slow-path wqe consume event carries "
8482 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8483 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8484 phba->sli4_hba.els_wq->queue_id);
8485}
8486
8487/**
8488 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8489 * @phba: Pointer to HBA context object.
8490 * @cq: Pointer to a WQ completion queue.
8491 * @wcqe: Pointer to work-queue completion queue entry.
8492 *
8493 * This routine handles an XRI abort event.
8494 *
8495 * Return: true if work posted to worker thread, otherwise false.
8496 **/
8497static bool
8498lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8499 struct lpfc_queue *cq,
8500 struct sli4_wcqe_xri_aborted *wcqe)
8501{
8502 bool workposted = false;
8503 struct lpfc_cq_event *cq_event;
8504 unsigned long iflags;
8505
8506 /* Allocate a new internal CQ_EVENT entry */
8507 cq_event = lpfc_sli4_cq_event_alloc(phba);
8508 if (!cq_event) {
8509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8510 "0602 Failed to allocate CQ_EVENT entry\n");
8511 return false;
8512 }
8513
8514 /* Move the CQE into the proper xri abort event list */
8515 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8516 switch (cq->subtype) {
8517 case LPFC_FCP:
8518 spin_lock_irqsave(&phba->hbalock, iflags);
8519 list_add_tail(&cq_event->list,
8520 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8521 /* Set the fcp xri abort event flag */
8522 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8523 spin_unlock_irqrestore(&phba->hbalock, iflags);
8524 workposted = true;
8525 break;
8526 case LPFC_ELS:
8527 spin_lock_irqsave(&phba->hbalock, iflags);
8528 list_add_tail(&cq_event->list,
8529 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8530 /* Set the els xri abort event flag */
8531 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8532 spin_unlock_irqrestore(&phba->hbalock, iflags);
8533 workposted = true;
8534 break;
8535 default:
8536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8537 "0603 Invalid work queue CQE subtype (x%x)\n",
8538 cq->subtype);
8539 workposted = false;
8540 break;
8541 }
8542 return workposted;
8543}
8544
8545/**
8546 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8547 * @phba: Pointer to HBA context object.
8548 * @cq: Pointer to the completion queue.
8549 * @wcqe: Pointer to a completion queue entry.
8550 *
8551 * This routine process a slow-path work-queue completion queue entry.
8552 *
8553 * Return: true if work posted to worker thread, otherwise false.
8554 **/
8555static bool
8556lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8557 struct lpfc_cqe *cqe)
8558{
8559 struct lpfc_wcqe_complete wcqe;
8560 bool workposted = false;
8561
8562 /* Copy the work queue CQE and convert endian order if needed */
8563 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8564
8565 /* Check and process for different type of WCQE and dispatch */
8566 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8567 case CQE_CODE_COMPL_WQE:
8568 /* Process the WQ complete event */
8569 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8570 (struct lpfc_wcqe_complete *)&wcqe);
8571 break;
8572 case CQE_CODE_RELEASE_WQE:
8573 /* Process the WQ release event */
8574 lpfc_sli4_sp_handle_rel_wcqe(phba,
8575 (struct lpfc_wcqe_release *)&wcqe);
8576 break;
8577 case CQE_CODE_XRI_ABORTED:
8578 /* Process the WQ XRI abort event */
8579 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8580 (struct sli4_wcqe_xri_aborted *)&wcqe);
8581 break;
8582 default:
8583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8584 "0388 Not a valid WCQE code: x%x\n",
8585 bf_get(lpfc_wcqe_c_code, &wcqe));
8586 break;
8587 }
8588 return workposted;
8589}
8590
8591/**
8592 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8593 * @phba: Pointer to HBA context object.
8594 * @rcqe: Pointer to receive-queue completion queue entry.
8595 *
8596 * This routine process a receive-queue completion queue entry.
8597 *
8598 * Return: true if work posted to worker thread, otherwise false.
8599 **/
8600static bool
8601lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8602{
8603 struct lpfc_rcqe rcqe;
8604 bool workposted = false;
8605 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8606 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8607 struct hbq_dmabuf *dma_buf;
8608 uint32_t status;
8609 unsigned long iflags;
8610
8611 /* Copy the receive queue CQE and convert endian order if needed */
8612 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8613 lpfc_sli4_rq_release(hrq, drq);
8614 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8615 goto out;
8616 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8617 goto out;
8618
8619 status = bf_get(lpfc_rcqe_status, &rcqe);
8620 switch (status) {
8621 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8623 "2537 Receive Frame Truncated!!\n");
8624 case FC_STATUS_RQ_SUCCESS:
8625 spin_lock_irqsave(&phba->hbalock, iflags);
8626 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8627 if (!dma_buf) {
8628 spin_unlock_irqrestore(&phba->hbalock, iflags);
8629 goto out;
8630 }
8631 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8632 /* save off the frame for the word thread to process */
8633 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8634 /* Frame received */
8635 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8636 spin_unlock_irqrestore(&phba->hbalock, iflags);
8637 workposted = true;
8638 break;
8639 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8640 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8641 /* Post more buffers if possible */
8642 spin_lock_irqsave(&phba->hbalock, iflags);
8643 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8644 spin_unlock_irqrestore(&phba->hbalock, iflags);
8645 workposted = true;
8646 break;
8647 }
8648out:
8649 return workposted;
8650
8651}
8652
8653/**
8654 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8655 * @phba: Pointer to HBA context object.
8656 * @eqe: Pointer to fast-path event queue entry.
8657 *
8658 * This routine process a event queue entry from the slow-path event queue.
8659 * It will check the MajorCode and MinorCode to determine this is for a
8660 * completion event on a completion queue, if not, an error shall be logged
8661 * and just return. Otherwise, it will get to the corresponding completion
8662 * queue and process all the entries on that completion queue, rearm the
8663 * completion queue, and then return.
8664 *
8665 **/
8666static void
8667lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8668{
8669 struct lpfc_queue *cq = NULL, *childq, *speq;
8670 struct lpfc_cqe *cqe;
8671 bool workposted = false;
8672 int ecount = 0;
8673 uint16_t cqid;
8674
8675 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8676 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8678 "0359 Not a valid slow-path completion "
8679 "event: majorcode=x%x, minorcode=x%x\n",
8680 bf_get(lpfc_eqe_major_code, eqe),
8681 bf_get(lpfc_eqe_minor_code, eqe));
8682 return;
8683 }
8684
8685 /* Get the reference to the corresponding CQ */
8686 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8687
8688 /* Search for completion queue pointer matching this cqid */
8689 speq = phba->sli4_hba.sp_eq;
8690 list_for_each_entry(childq, &speq->child_list, list) {
8691 if (childq->queue_id == cqid) {
8692 cq = childq;
8693 break;
8694 }
8695 }
8696 if (unlikely(!cq)) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8698 "0365 Slow-path CQ identifier (%d) does "
8699 "not exist\n", cqid);
8700 return;
8701 }
8702
8703 /* Process all the entries to the CQ */
8704 switch (cq->type) {
8705 case LPFC_MCQ:
8706 while ((cqe = lpfc_sli4_cq_get(cq))) {
8707 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8708 if (!(++ecount % LPFC_GET_QE_REL_INT))
8709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8710 }
8711 break;
8712 case LPFC_WCQ:
8713 while ((cqe = lpfc_sli4_cq_get(cq))) {
8714 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8715 if (!(++ecount % LPFC_GET_QE_REL_INT))
8716 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8717 }
8718 break;
8719 case LPFC_RCQ:
8720 while ((cqe = lpfc_sli4_cq_get(cq))) {
8721 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8722 if (!(++ecount % LPFC_GET_QE_REL_INT))
8723 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8724 }
8725 break;
8726 default:
8727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8728 "0370 Invalid completion queue type (%d)\n",
8729 cq->type);
8730 return;
8731 }
8732
8733 /* Catch the no cq entry condition, log an error */
8734 if (unlikely(ecount == 0))
8735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8736 "0371 No entry from the CQ: identifier "
8737 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8738
8739 /* In any case, flash and re-arm the RCQ */
8740 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8741
8742 /* wake up worker thread if there are works to be done */
8743 if (workposted)
8744 lpfc_worker_wake_up(phba);
8745}
8746
8747/**
8748 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8749 * @eqe: Pointer to fast-path completion queue entry.
8750 *
8751 * This routine process a fast-path work queue completion entry from fast-path
8752 * event queue for FCP command response completion.
8753 **/
8754static void
8755lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8756 struct lpfc_wcqe_complete *wcqe)
8757{
8758 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8759 struct lpfc_iocbq *cmdiocbq;
8760 struct lpfc_iocbq irspiocbq;
8761 unsigned long iflags;
8762
8763 spin_lock_irqsave(&phba->hbalock, iflags);
8764 pring->stats.iocb_event++;
8765 spin_unlock_irqrestore(&phba->hbalock, iflags);
8766
8767 /* Check for response status */
8768 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8769 /* If resource errors reported from HBA, reduce queue
8770 * depth of the SCSI device.
8771 */
8772 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8773 IOSTAT_LOCAL_REJECT) &&
8774 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8775 phba->lpfc_rampdown_queue_depth(phba);
8776 }
8777 /* Log the error status */
8778 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8779 "0373 FCP complete error: status=x%x, "
8780 "hw_status=x%x, total_data_specified=%d, "
8781 "parameter=x%x, word3=x%x\n",
8782 bf_get(lpfc_wcqe_c_status, wcqe),
8783 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8784 wcqe->total_data_placed, wcqe->parameter,
8785 wcqe->word3);
8786 }
8787
8788 /* Look up the FCP command IOCB and create pseudo response IOCB */
8789 spin_lock_irqsave(&phba->hbalock, iflags);
8790 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8791 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8792 spin_unlock_irqrestore(&phba->hbalock, iflags);
8793 if (unlikely(!cmdiocbq)) {
8794 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8795 "0374 FCP complete with no corresponding "
8796 "cmdiocb: iotag (%d)\n",
8797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8798 return;
8799 }
8800 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8802 "0375 FCP cmdiocb not callback function "
8803 "iotag: (%d)\n",
8804 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8805 return;
8806 }
8807
8808 /* Fake the irspiocb and copy necessary response information */
8809 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8810
8811 /* Pass the cmd_iocb and the rsp state to the upper layer */
8812 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8813}
8814
8815/**
8816 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8817 * @phba: Pointer to HBA context object.
8818 * @cq: Pointer to completion queue.
8819 * @wcqe: Pointer to work-queue completion queue entry.
8820 *
8821 * This routine handles an fast-path WQ entry comsumed event by invoking the
8822 * proper WQ release routine to the slow-path WQ.
8823 **/
8824static void
8825lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8826 struct lpfc_wcqe_release *wcqe)
8827{
8828 struct lpfc_queue *childwq;
8829 bool wqid_matched = false;
8830 uint16_t fcp_wqid;
8831
8832 /* Check for fast-path FCP work queue release */
8833 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8834 list_for_each_entry(childwq, &cq->child_list, list) {
8835 if (childwq->queue_id == fcp_wqid) {
8836 lpfc_sli4_wq_release(childwq,
8837 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8838 wqid_matched = true;
8839 break;
8840 }
8841 }
8842 /* Report warning log message if no match found */
8843 if (wqid_matched != true)
8844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8845 "2580 Fast-path wqe consume event carries "
8846 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8847}
8848
8849/**
8850 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8851 * @cq: Pointer to the completion queue.
8852 * @eqe: Pointer to fast-path completion queue entry.
8853 *
8854 * This routine process a fast-path work queue completion entry from fast-path
8855 * event queue for FCP command response completion.
8856 **/
8857static int
8858lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8859 struct lpfc_cqe *cqe)
8860{
8861 struct lpfc_wcqe_release wcqe;
8862 bool workposted = false;
8863
8864 /* Copy the work queue CQE and convert endian order if needed */
8865 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8866
8867 /* Check and process for different type of WCQE and dispatch */
8868 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8869 case CQE_CODE_COMPL_WQE:
8870 /* Process the WQ complete event */
8871 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8872 (struct lpfc_wcqe_complete *)&wcqe);
8873 break;
8874 case CQE_CODE_RELEASE_WQE:
8875 /* Process the WQ release event */
8876 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
8877 (struct lpfc_wcqe_release *)&wcqe);
8878 break;
8879 case CQE_CODE_XRI_ABORTED:
8880 /* Process the WQ XRI abort event */
8881 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8882 (struct sli4_wcqe_xri_aborted *)&wcqe);
8883 break;
8884 default:
8885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8886 "0144 Not a valid WCQE code: x%x\n",
8887 bf_get(lpfc_wcqe_c_code, &wcqe));
8888 break;
8889 }
8890 return workposted;
8891}
8892
8893/**
8894 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
8895 * @phba: Pointer to HBA context object.
8896 * @eqe: Pointer to fast-path event queue entry.
8897 *
8898 * This routine process a event queue entry from the fast-path event queue.
8899 * It will check the MajorCode and MinorCode to determine this is for a
8900 * completion event on a completion queue, if not, an error shall be logged
8901 * and just return. Otherwise, it will get to the corresponding completion
8902 * queue and process all the entries on the completion queue, rearm the
8903 * completion queue, and then return.
8904 **/
8905static void
8906lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
8907 uint32_t fcp_cqidx)
8908{
8909 struct lpfc_queue *cq;
8910 struct lpfc_cqe *cqe;
8911 bool workposted = false;
8912 uint16_t cqid;
8913 int ecount = 0;
8914
8915 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
8916 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8918 "0366 Not a valid fast-path completion "
8919 "event: majorcode=x%x, minorcode=x%x\n",
8920 bf_get(lpfc_eqe_major_code, eqe),
8921 bf_get(lpfc_eqe_minor_code, eqe));
8922 return;
8923 }
8924
8925 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
8926 if (unlikely(!cq)) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8928 "0367 Fast-path completion queue does not "
8929 "exist\n");
8930 return;
8931 }
8932
8933 /* Get the reference to the corresponding CQ */
8934 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8935 if (unlikely(cqid != cq->queue_id)) {
8936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8937 "0368 Miss-matched fast-path completion "
8938 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
8939 cqid, cq->queue_id);
8940 return;
8941 }
8942
8943 /* Process all the entries to the CQ */
8944 while ((cqe = lpfc_sli4_cq_get(cq))) {
8945 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
8946 if (!(++ecount % LPFC_GET_QE_REL_INT))
8947 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8948 }
8949
8950 /* Catch the no cq entry condition */
8951 if (unlikely(ecount == 0))
8952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8953 "0369 No entry from fast-path completion "
8954 "queue fcpcqid=%d\n", cq->queue_id);
8955
8956 /* In any case, flash and re-arm the CQ */
8957 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8958
8959 /* wake up worker thread if there are works to be done */
8960 if (workposted)
8961 lpfc_worker_wake_up(phba);
8962}
8963
8964static void
8965lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
8966{
8967 struct lpfc_eqe *eqe;
8968
8969 /* walk all the EQ entries and drop on the floor */
8970 while ((eqe = lpfc_sli4_eq_get(eq)))
8971 ;
8972
8973 /* Clear and re-arm the EQ */
8974 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
8975}
8976
8977/**
8978 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
8979 * @irq: Interrupt number.
8980 * @dev_id: The device context pointer.
8981 *
8982 * This function is directly called from the PCI layer as an interrupt
8983 * service routine when device with SLI-4 interface spec is enabled with
8984 * MSI-X multi-message interrupt mode and there are slow-path events in
8985 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8986 * interrupt mode, this function is called as part of the device-level
8987 * interrupt handler. When the PCI slot is in error recovery or the HBA is
8988 * undergoing initialization, the interrupt handler will not process the
8989 * interrupt. The link attention and ELS ring attention events are handled
8990 * by the worker thread. The interrupt handler signals the worker thread
8991 * and returns for these events. This function is called without any lock
8992 * held. It gets the hbalock to access and update SLI data structures.
8993 *
8994 * This function returns IRQ_HANDLED when interrupt is handled else it
8995 * returns IRQ_NONE.
8996 **/
8997irqreturn_t
8998lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
8999{
9000 struct lpfc_hba *phba;
9001 struct lpfc_queue *speq;
9002 struct lpfc_eqe *eqe;
9003 unsigned long iflag;
9004 int ecount = 0;
9005
9006 /*
9007 * Get the driver's phba structure from the dev_id
9008 */
9009 phba = (struct lpfc_hba *)dev_id;
9010
9011 if (unlikely(!phba))
9012 return IRQ_NONE;
9013
9014 /* Get to the EQ struct associated with this vector */
9015 speq = phba->sli4_hba.sp_eq;
9016
9017 /* Check device state for handling interrupt */
9018 if (unlikely(lpfc_intr_state_check(phba))) {
9019 /* Check again for link_state with lock held */
9020 spin_lock_irqsave(&phba->hbalock, iflag);
9021 if (phba->link_state < LPFC_LINK_DOWN)
9022 /* Flush, clear interrupt, and rearm the EQ */
9023 lpfc_sli4_eq_flush(phba, speq);
9024 spin_unlock_irqrestore(&phba->hbalock, iflag);
9025 return IRQ_NONE;
9026 }
9027
9028 /*
9029 * Process all the event on FCP slow-path EQ
9030 */
9031 while ((eqe = lpfc_sli4_eq_get(speq))) {
9032 lpfc_sli4_sp_handle_eqe(phba, eqe);
9033 if (!(++ecount % LPFC_GET_QE_REL_INT))
9034 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9035 }
9036
9037 /* Always clear and re-arm the slow-path EQ */
9038 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9039
9040 /* Catch the no cq entry condition */
9041 if (unlikely(ecount == 0)) {
9042 if (phba->intr_type == MSIX)
9043 /* MSI-X treated interrupt served as no EQ share INT */
9044 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9045 "0357 MSI-X interrupt with no EQE\n");
9046 else
9047 /* Non MSI-X treated on interrupt as EQ share INT */
9048 return IRQ_NONE;
9049 }
9050
9051 return IRQ_HANDLED;
9052} /* lpfc_sli4_sp_intr_handler */
9053
9054/**
9055 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9056 * @irq: Interrupt number.
9057 * @dev_id: The device context pointer.
9058 *
9059 * This function is directly called from the PCI layer as an interrupt
9060 * service routine when device with SLI-4 interface spec is enabled with
9061 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9062 * ring event in the HBA. However, when the device is enabled with either
9063 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9064 * device-level interrupt handler. When the PCI slot is in error recovery
9065 * or the HBA is undergoing initialization, the interrupt handler will not
9066 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9067 * the intrrupt context. This function is called without any lock held.
9068 * It gets the hbalock to access and update SLI data structures. Note that,
9069 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9070 * equal to that of FCP CQ index.
9071 *
9072 * This function returns IRQ_HANDLED when interrupt is handled else it
9073 * returns IRQ_NONE.
9074 **/
9075irqreturn_t
9076lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9077{
9078 struct lpfc_hba *phba;
9079 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9080 struct lpfc_queue *fpeq;
9081 struct lpfc_eqe *eqe;
9082 unsigned long iflag;
9083 int ecount = 0;
9084 uint32_t fcp_eqidx;
9085
9086 /* Get the driver's phba structure from the dev_id */
9087 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9088 phba = fcp_eq_hdl->phba;
9089 fcp_eqidx = fcp_eq_hdl->idx;
9090
9091 if (unlikely(!phba))
9092 return IRQ_NONE;
9093
9094 /* Get to the EQ struct associated with this vector */
9095 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9096
9097 /* Check device state for handling interrupt */
9098 if (unlikely(lpfc_intr_state_check(phba))) {
9099 /* Check again for link_state with lock held */
9100 spin_lock_irqsave(&phba->hbalock, iflag);
9101 if (phba->link_state < LPFC_LINK_DOWN)
9102 /* Flush, clear interrupt, and rearm the EQ */
9103 lpfc_sli4_eq_flush(phba, fpeq);
9104 spin_unlock_irqrestore(&phba->hbalock, iflag);
9105 return IRQ_NONE;
9106 }
9107
9108 /*
9109 * Process all the event on FCP fast-path EQ
9110 */
9111 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9112 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9113 if (!(++ecount % LPFC_GET_QE_REL_INT))
9114 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9115 }
9116
9117 /* Always clear and re-arm the fast-path EQ */
9118 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9119
9120 if (unlikely(ecount == 0)) {
9121 if (phba->intr_type == MSIX)
9122 /* MSI-X treated interrupt served as no EQ share INT */
9123 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9124 "0358 MSI-X interrupt with no EQE\n");
9125 else
9126 /* Non MSI-X treated on interrupt as EQ share INT */
9127 return IRQ_NONE;
9128 }
9129
9130 return IRQ_HANDLED;
9131} /* lpfc_sli4_fp_intr_handler */
9132
9133/**
9134 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9135 * @irq: Interrupt number.
9136 * @dev_id: The device context pointer.
9137 *
9138 * This function is the device-level interrupt handler to device with SLI-4
9139 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9140 * interrupt mode is enabled and there is an event in the HBA which requires
9141 * driver attention. This function invokes the slow-path interrupt attention
9142 * handling function and fast-path interrupt attention handling function in
9143 * turn to process the relevant HBA attention events. This function is called
9144 * without any lock held. It gets the hbalock to access and update SLI data
9145 * structures.
9146 *
9147 * This function returns IRQ_HANDLED when interrupt is handled, else it
9148 * returns IRQ_NONE.
9149 **/
9150irqreturn_t
9151lpfc_sli4_intr_handler(int irq, void *dev_id)
9152{
9153 struct lpfc_hba *phba;
9154 irqreturn_t sp_irq_rc, fp_irq_rc;
9155 bool fp_handled = false;
9156 uint32_t fcp_eqidx;
9157
9158 /* Get the driver's phba structure from the dev_id */
9159 phba = (struct lpfc_hba *)dev_id;
9160
9161 if (unlikely(!phba))
9162 return IRQ_NONE;
9163
9164 /*
9165 * Invokes slow-path host attention interrupt handling as appropriate.
9166 */
9167 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9168
9169 /*
9170 * Invoke fast-path host attention interrupt handling as appropriate.
9171 */
9172 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9173 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9174 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9175 if (fp_irq_rc == IRQ_HANDLED)
9176 fp_handled |= true;
9177 }
9178
9179 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9180} /* lpfc_sli4_intr_handler */
9181
9182/**
9183 * lpfc_sli4_queue_free - free a queue structure and associated memory
9184 * @queue: The queue structure to free.
9185 *
9186 * This function frees a queue structure and the DMAable memeory used for
9187 * the host resident queue. This function must be called after destroying the
9188 * queue on the HBA.
9189 **/
9190void
9191lpfc_sli4_queue_free(struct lpfc_queue *queue)
9192{
9193 struct lpfc_dmabuf *dmabuf;
9194
9195 if (!queue)
9196 return;
9197
9198 while (!list_empty(&queue->page_list)) {
9199 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9200 list);
9201 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9202 dmabuf->virt, dmabuf->phys);
9203 kfree(dmabuf);
9204 }
9205 kfree(queue);
9206 return;
9207}
9208
9209/**
9210 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9211 * @phba: The HBA that this queue is being created on.
9212 * @entry_size: The size of each queue entry for this queue.
9213 * @entry count: The number of entries that this queue will handle.
9214 *
9215 * This function allocates a queue structure and the DMAable memory used for
9216 * the host resident queue. This function must be called before creating the
9217 * queue on the HBA.
9218 **/
9219struct lpfc_queue *
9220lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9221 uint32_t entry_count)
9222{
9223 struct lpfc_queue *queue;
9224 struct lpfc_dmabuf *dmabuf;
9225 int x, total_qe_count;
9226 void *dma_pointer;
9227
9228
9229 queue = kzalloc(sizeof(struct lpfc_queue) +
9230 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9231 if (!queue)
9232 return NULL;
9233 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9234 INIT_LIST_HEAD(&queue->list);
9235 INIT_LIST_HEAD(&queue->page_list);
9236 INIT_LIST_HEAD(&queue->child_list);
9237 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9238 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9239 if (!dmabuf)
9240 goto out_fail;
9241 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9242 PAGE_SIZE, &dmabuf->phys,
9243 GFP_KERNEL);
9244 if (!dmabuf->virt) {
9245 kfree(dmabuf);
9246 goto out_fail;
9247 }
9248 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */
9251 dma_pointer = dmabuf->virt;
9252 for (; total_qe_count < entry_count &&
9253 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9254 total_qe_count++, dma_pointer += entry_size) {
9255 queue->qe[total_qe_count].address = dma_pointer;
9256 }
9257 }
9258 queue->entry_size = entry_size;
9259 queue->entry_count = entry_count;
9260 queue->phba = phba;
9261
9262 return queue;
9263out_fail:
9264 lpfc_sli4_queue_free(queue);
9265 return NULL;
9266}
9267
9268/**
9269 * lpfc_eq_create - Create an Event Queue on the HBA
9270 * @phba: HBA structure that indicates port to create a queue on.
9271 * @eq: The queue structure to use to create the event queue.
9272 * @imax: The maximum interrupt per second limit.
9273 *
9274 * This function creates an event queue, as detailed in @eq, on a port,
9275 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9276 *
9277 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9278 * is used to get the entry count and entry size that are necessary to
9279 * determine the number of pages to allocate and use for this queue. This
9280 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9281 * event queue. This function is asynchronous and will wait for the mailbox
9282 * command to finish before continuing.
9283 *
9284 * On success this function will return a zero. If unable to allocate enough
9285 * memory this function will return ENOMEM. If the queue create mailbox command
9286 * fails this function will return ENXIO.
9287 **/
9288uint32_t
9289lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9290{
9291 struct lpfc_mbx_eq_create *eq_create;
9292 LPFC_MBOXQ_t *mbox;
9293 int rc, length, status = 0;
9294 struct lpfc_dmabuf *dmabuf;
9295 uint32_t shdr_status, shdr_add_status;
9296 union lpfc_sli4_cfg_shdr *shdr;
9297 uint16_t dmult;
9298
9299 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9300 if (!mbox)
9301 return -ENOMEM;
9302 length = (sizeof(struct lpfc_mbx_eq_create) -
9303 sizeof(struct lpfc_sli4_cfg_mhdr));
9304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9305 LPFC_MBOX_OPCODE_EQ_CREATE,
9306 length, LPFC_SLI4_MBX_EMBED);
9307 eq_create = &mbox->u.mqe.un.eq_create;
9308 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9309 eq->page_count);
9310 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9311 LPFC_EQE_SIZE);
9312 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9313 /* Calculate delay multiper from maximum interrupt per second */
9314 dmult = LPFC_DMULT_CONST/imax - 1;
9315 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9316 dmult);
9317 switch (eq->entry_count) {
9318 default:
9319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9320 "0360 Unsupported EQ count. (%d)\n",
9321 eq->entry_count);
9322 if (eq->entry_count < 256)
9323 return -EINVAL;
9324 /* otherwise default to smallest count (drop through) */
9325 case 256:
9326 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9327 LPFC_EQ_CNT_256);
9328 break;
9329 case 512:
9330 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9331 LPFC_EQ_CNT_512);
9332 break;
9333 case 1024:
9334 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9335 LPFC_EQ_CNT_1024);
9336 break;
9337 case 2048:
9338 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9339 LPFC_EQ_CNT_2048);
9340 break;
9341 case 4096:
9342 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9343 LPFC_EQ_CNT_4096);
9344 break;
9345 }
9346 list_for_each_entry(dmabuf, &eq->page_list, list) {
9347 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9348 putPaddrLow(dmabuf->phys);
9349 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9350 putPaddrHigh(dmabuf->phys);
9351 }
9352 mbox->vport = phba->pport;
9353 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9354 mbox->context1 = NULL;
9355 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9356 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9359 if (shdr_status || shdr_add_status || rc) {
9360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9361 "2500 EQ_CREATE mailbox failed with "
9362 "status x%x add_status x%x, mbx status x%x\n",
9363 shdr_status, shdr_add_status, rc);
9364 status = -ENXIO;
9365 }
9366 eq->type = LPFC_EQ;
9367 eq->subtype = LPFC_NONE;
9368 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9369 if (eq->queue_id == 0xFFFF)
9370 status = -ENXIO;
9371 eq->host_index = 0;
9372 eq->hba_index = 0;
9373
9374 if (rc != MBX_TIMEOUT)
9375 mempool_free(mbox, phba->mbox_mem_pool);
9376 return status;
9377}
9378
9379/**
9380 * lpfc_cq_create - Create a Completion Queue on the HBA
9381 * @phba: HBA structure that indicates port to create a queue on.
9382 * @cq: The queue structure to use to create the completion queue.
9383 * @eq: The event queue to bind this completion queue to.
9384 *
9385 * This function creates a completion queue, as detailed in @wq, on a port,
9386 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9387 *
9388 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9389 * is used to get the entry count and entry size that are necessary to
9390 * determine the number of pages to allocate and use for this queue. The @eq
9391 * is used to indicate which event queue to bind this completion queue to. This
9392 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9393 * completion queue. This function is asynchronous and will wait for the mailbox
9394 * command to finish before continuing.
9395 *
9396 * On success this function will return a zero. If unable to allocate enough
9397 * memory this function will return ENOMEM. If the queue create mailbox command
9398 * fails this function will return ENXIO.
9399 **/
9400uint32_t
9401lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9402 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9403{
9404 struct lpfc_mbx_cq_create *cq_create;
9405 struct lpfc_dmabuf *dmabuf;
9406 LPFC_MBOXQ_t *mbox;
9407 int rc, length, status = 0;
9408 uint32_t shdr_status, shdr_add_status;
9409 union lpfc_sli4_cfg_shdr *shdr;
9410
9411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9412 if (!mbox)
9413 return -ENOMEM;
9414 length = (sizeof(struct lpfc_mbx_cq_create) -
9415 sizeof(struct lpfc_sli4_cfg_mhdr));
9416 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9417 LPFC_MBOX_OPCODE_CQ_CREATE,
9418 length, LPFC_SLI4_MBX_EMBED);
9419 cq_create = &mbox->u.mqe.un.cq_create;
9420 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9421 cq->page_count);
9422 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9423 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9424 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9425 switch (cq->entry_count) {
9426 default:
9427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9428 "0361 Unsupported CQ count. (%d)\n",
9429 cq->entry_count);
9430 if (cq->entry_count < 256)
9431 return -EINVAL;
9432 /* otherwise default to smallest count (drop through) */
9433 case 256:
9434 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9435 LPFC_CQ_CNT_256);
9436 break;
9437 case 512:
9438 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9439 LPFC_CQ_CNT_512);
9440 break;
9441 case 1024:
9442 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9443 LPFC_CQ_CNT_1024);
9444 break;
9445 }
9446 list_for_each_entry(dmabuf, &cq->page_list, list) {
9447 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9448 putPaddrLow(dmabuf->phys);
9449 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9450 putPaddrHigh(dmabuf->phys);
9451 }
9452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9453
9454 /* The IOCTL status is embedded in the mailbox subheader. */
9455 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9458 if (shdr_status || shdr_add_status || rc) {
9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460 "2501 CQ_CREATE mailbox failed with "
9461 "status x%x add_status x%x, mbx status x%x\n",
9462 shdr_status, shdr_add_status, rc);
9463 status = -ENXIO;
9464 goto out;
9465 }
9466 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9467 if (cq->queue_id == 0xFFFF) {
9468 status = -ENXIO;
9469 goto out;
9470 }
9471 /* link the cq onto the parent eq child list */
9472 list_add_tail(&cq->list, &eq->child_list);
9473 /* Set up completion queue's type and subtype */
9474 cq->type = type;
9475 cq->subtype = subtype;
9476 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9477 cq->host_index = 0;
9478 cq->hba_index = 0;
9479out:
9480
9481 if (rc != MBX_TIMEOUT)
9482 mempool_free(mbox, phba->mbox_mem_pool);
9483 return status;
9484}
9485
9486/**
9487 * lpfc_mq_create - Create a mailbox Queue on the HBA
9488 * @phba: HBA structure that indicates port to create a queue on.
9489 * @mq: The queue structure to use to create the mailbox queue.
9490 *
9491 * This function creates a mailbox queue, as detailed in @mq, on a port,
9492 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9493 *
9494 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9495 * is used to get the entry count and entry size that are necessary to
9496 * determine the number of pages to allocate and use for this queue. This
9497 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9498 * mailbox queue. This function is asynchronous and will wait for the mailbox
9499 * command to finish before continuing.
9500 *
9501 * On success this function will return a zero. If unable to allocate enough
9502 * memory this function will return ENOMEM. If the queue create mailbox command
9503 * fails this function will return ENXIO.
9504 **/
9505uint32_t
9506lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9507 struct lpfc_queue *cq, uint32_t subtype)
9508{
9509 struct lpfc_mbx_mq_create *mq_create;
9510 struct lpfc_dmabuf *dmabuf;
9511 LPFC_MBOXQ_t *mbox;
9512 int rc, length, status = 0;
9513 uint32_t shdr_status, shdr_add_status;
9514 union lpfc_sli4_cfg_shdr *shdr;
9515
9516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9517 if (!mbox)
9518 return -ENOMEM;
9519 length = (sizeof(struct lpfc_mbx_mq_create) -
9520 sizeof(struct lpfc_sli4_cfg_mhdr));
9521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9522 LPFC_MBOX_OPCODE_MQ_CREATE,
9523 length, LPFC_SLI4_MBX_EMBED);
9524 mq_create = &mbox->u.mqe.un.mq_create;
9525 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9526 mq->page_count);
9527 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9528 cq->queue_id);
9529 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9530 switch (mq->entry_count) {
9531 default:
9532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9533 "0362 Unsupported MQ count. (%d)\n",
9534 mq->entry_count);
9535 if (mq->entry_count < 16)
9536 return -EINVAL;
9537 /* otherwise default to smallest count (drop through) */
9538 case 16:
9539 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9540 LPFC_MQ_CNT_16);
9541 break;
9542 case 32:
9543 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9544 LPFC_MQ_CNT_32);
9545 break;
9546 case 64:
9547 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9548 LPFC_MQ_CNT_64);
9549 break;
9550 case 128:
9551 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9552 LPFC_MQ_CNT_128);
9553 break;
9554 }
9555 list_for_each_entry(dmabuf, &mq->page_list, list) {
9556 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9557 putPaddrLow(dmabuf->phys);
9558 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9559 putPaddrHigh(dmabuf->phys);
9560 }
9561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9562 /* The IOCTL status is embedded in the mailbox subheader. */
9563 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9566 if (shdr_status || shdr_add_status || rc) {
9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9568 "2502 MQ_CREATE mailbox failed with "
9569 "status x%x add_status x%x, mbx status x%x\n",
9570 shdr_status, shdr_add_status, rc);
9571 status = -ENXIO;
9572 goto out;
9573 }
9574 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9575 if (mq->queue_id == 0xFFFF) {
9576 status = -ENXIO;
9577 goto out;
9578 }
9579 mq->type = LPFC_MQ;
9580 mq->subtype = subtype;
9581 mq->host_index = 0;
9582 mq->hba_index = 0;
9583
9584 /* link the mq onto the parent cq child list */
9585 list_add_tail(&mq->list, &cq->child_list);
9586out:
9587 if (rc != MBX_TIMEOUT)
9588 mempool_free(mbox, phba->mbox_mem_pool);
9589 return status;
9590}
9591
9592/**
9593 * lpfc_wq_create - Create a Work Queue on the HBA
9594 * @phba: HBA structure that indicates port to create a queue on.
9595 * @wq: The queue structure to use to create the work queue.
9596 * @cq: The completion queue to bind this work queue to.
9597 * @subtype: The subtype of the work queue indicating its functionality.
9598 *
9599 * This function creates a work queue, as detailed in @wq, on a port, described
9600 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9601 *
9602 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9603 * is used to get the entry count and entry size that are necessary to
9604 * determine the number of pages to allocate and use for this queue. The @cq
9605 * is used to indicate which completion queue to bind this work queue to. This
9606 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9607 * work queue. This function is asynchronous and will wait for the mailbox
9608 * command to finish before continuing.
9609 *
9610 * On success this function will return a zero. If unable to allocate enough
9611 * memory this function will return ENOMEM. If the queue create mailbox command
9612 * fails this function will return ENXIO.
9613 **/
9614uint32_t
9615lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9616 struct lpfc_queue *cq, uint32_t subtype)
9617{
9618 struct lpfc_mbx_wq_create *wq_create;
9619 struct lpfc_dmabuf *dmabuf;
9620 LPFC_MBOXQ_t *mbox;
9621 int rc, length, status = 0;
9622 uint32_t shdr_status, shdr_add_status;
9623 union lpfc_sli4_cfg_shdr *shdr;
9624
9625 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9626 if (!mbox)
9627 return -ENOMEM;
9628 length = (sizeof(struct lpfc_mbx_wq_create) -
9629 sizeof(struct lpfc_sli4_cfg_mhdr));
9630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9631 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9632 length, LPFC_SLI4_MBX_EMBED);
9633 wq_create = &mbox->u.mqe.un.wq_create;
9634 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9635 wq->page_count);
9636 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9637 cq->queue_id);
9638 list_for_each_entry(dmabuf, &wq->page_list, list) {
9639 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9640 putPaddrLow(dmabuf->phys);
9641 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9642 putPaddrHigh(dmabuf->phys);
9643 }
9644 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9645 /* The IOCTL status is embedded in the mailbox subheader. */
9646 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9649 if (shdr_status || shdr_add_status || rc) {
9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9651 "2503 WQ_CREATE mailbox failed with "
9652 "status x%x add_status x%x, mbx status x%x\n",
9653 shdr_status, shdr_add_status, rc);
9654 status = -ENXIO;
9655 goto out;
9656 }
9657 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9658 if (wq->queue_id == 0xFFFF) {
9659 status = -ENXIO;
9660 goto out;
9661 }
9662 wq->type = LPFC_WQ;
9663 wq->subtype = subtype;
9664 wq->host_index = 0;
9665 wq->hba_index = 0;
9666
9667 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list);
9669out:
9670 if (rc == MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status;
9673}
9674
9675/**
9676 * lpfc_rq_create - Create a Receive Queue on the HBA
9677 * @phba: HBA structure that indicates port to create a queue on.
9678 * @hrq: The queue structure to use to create the header receive queue.
9679 * @drq: The queue structure to use to create the data receive queue.
9680 * @cq: The completion queue to bind this work queue to.
9681 *
9682 * This function creates a receive buffer queue pair , as detailed in @hrq and
9683 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9684 * to the HBA.
9685 *
9686 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9687 * struct is used to get the entry count that is necessary to determine the
9688 * number of pages to use for this queue. The @cq is used to indicate which
9689 * completion queue to bind received buffers that are posted to these queues to.
9690 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9691 * receive queue pair. This function is asynchronous and will wait for the
9692 * mailbox command to finish before continuing.
9693 *
9694 * On success this function will return a zero. If unable to allocate enough
9695 * memory this function will return ENOMEM. If the queue create mailbox command
9696 * fails this function will return ENXIO.
9697 **/
9698uint32_t
9699lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9700 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9701{
9702 struct lpfc_mbx_rq_create *rq_create;
9703 struct lpfc_dmabuf *dmabuf;
9704 LPFC_MBOXQ_t *mbox;
9705 int rc, length, status = 0;
9706 uint32_t shdr_status, shdr_add_status;
9707 union lpfc_sli4_cfg_shdr *shdr;
9708
9709 if (hrq->entry_count != drq->entry_count)
9710 return -EINVAL;
9711 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9712 if (!mbox)
9713 return -ENOMEM;
9714 length = (sizeof(struct lpfc_mbx_rq_create) -
9715 sizeof(struct lpfc_sli4_cfg_mhdr));
9716 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9717 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9718 length, LPFC_SLI4_MBX_EMBED);
9719 rq_create = &mbox->u.mqe.un.rq_create;
9720 switch (hrq->entry_count) {
9721 default:
9722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9723 "2535 Unsupported RQ count. (%d)\n",
9724 hrq->entry_count);
9725 if (hrq->entry_count < 512)
9726 return -EINVAL;
9727 /* otherwise default to smallest count (drop through) */
9728 case 512:
9729 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9730 LPFC_RQ_RING_SIZE_512);
9731 break;
9732 case 1024:
9733 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9734 LPFC_RQ_RING_SIZE_1024);
9735 break;
9736 case 2048:
9737 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9738 LPFC_RQ_RING_SIZE_2048);
9739 break;
9740 case 4096:
9741 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9742 LPFC_RQ_RING_SIZE_4096);
9743 break;
9744 }
9745 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9746 cq->queue_id);
9747 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9748 hrq->page_count);
9749 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9750 LPFC_HDR_BUF_SIZE);
9751 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9752 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9753 putPaddrLow(dmabuf->phys);
9754 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9755 putPaddrHigh(dmabuf->phys);
9756 }
9757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9758 /* The IOCTL status is embedded in the mailbox subheader. */
9759 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9760 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9761 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9762 if (shdr_status || shdr_add_status || rc) {
9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9764 "2504 RQ_CREATE mailbox failed with "
9765 "status x%x add_status x%x, mbx status x%x\n",
9766 shdr_status, shdr_add_status, rc);
9767 status = -ENXIO;
9768 goto out;
9769 }
9770 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9771 if (hrq->queue_id == 0xFFFF) {
9772 status = -ENXIO;
9773 goto out;
9774 }
9775 hrq->type = LPFC_HRQ;
9776 hrq->subtype = subtype;
9777 hrq->host_index = 0;
9778 hrq->hba_index = 0;
9779
9780 /* now create the data queue */
9781 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9782 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9783 length, LPFC_SLI4_MBX_EMBED);
9784 switch (drq->entry_count) {
9785 default:
9786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9787 "2536 Unsupported RQ count. (%d)\n",
9788 drq->entry_count);
9789 if (drq->entry_count < 512)
9790 return -EINVAL;
9791 /* otherwise default to smallest count (drop through) */
9792 case 512:
9793 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9794 LPFC_RQ_RING_SIZE_512);
9795 break;
9796 case 1024:
9797 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9798 LPFC_RQ_RING_SIZE_1024);
9799 break;
9800 case 2048:
9801 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9802 LPFC_RQ_RING_SIZE_2048);
9803 break;
9804 case 4096:
9805 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9806 LPFC_RQ_RING_SIZE_4096);
9807 break;
9808 }
9809 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9810 cq->queue_id);
9811 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9812 drq->page_count);
9813 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9814 LPFC_DATA_BUF_SIZE);
9815 list_for_each_entry(dmabuf, &drq->page_list, list) {
9816 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9817 putPaddrLow(dmabuf->phys);
9818 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9819 putPaddrHigh(dmabuf->phys);
9820 }
9821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9822 /* The IOCTL status is embedded in the mailbox subheader. */
9823 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9826 if (shdr_status || shdr_add_status || rc) {
9827 status = -ENXIO;
9828 goto out;
9829 }
9830 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9831 if (drq->queue_id == 0xFFFF) {
9832 status = -ENXIO;
9833 goto out;
9834 }
9835 drq->type = LPFC_DRQ;
9836 drq->subtype = subtype;
9837 drq->host_index = 0;
9838 drq->hba_index = 0;
9839
9840 /* link the header and data RQs onto the parent cq child list */
9841 list_add_tail(&hrq->list, &cq->child_list);
9842 list_add_tail(&drq->list, &cq->child_list);
9843
9844out:
9845 if (rc != MBX_TIMEOUT)
9846 mempool_free(mbox, phba->mbox_mem_pool);
9847 return status;
9848}
9849
9850/**
9851 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9852 * @eq: The queue structure associated with the queue to destroy.
9853 *
9854 * This function destroys a queue, as detailed in @eq by sending an mailbox
9855 * command, specific to the type of queue, to the HBA.
9856 *
9857 * The @eq struct is used to get the queue ID of the queue to destroy.
9858 *
9859 * On success this function will return a zero. If the queue destroy mailbox
9860 * command fails this function will return ENXIO.
9861 **/
9862uint32_t
9863lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9864{
9865 LPFC_MBOXQ_t *mbox;
9866 int rc, length, status = 0;
9867 uint32_t shdr_status, shdr_add_status;
9868 union lpfc_sli4_cfg_shdr *shdr;
9869
9870 if (!eq)
9871 return -ENODEV;
9872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
9873 if (!mbox)
9874 return -ENOMEM;
9875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
9876 sizeof(struct lpfc_sli4_cfg_mhdr));
9877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9878 LPFC_MBOX_OPCODE_EQ_DESTROY,
9879 length, LPFC_SLI4_MBX_EMBED);
9880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
9881 eq->queue_id);
9882 mbox->vport = eq->phba->pport;
9883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9884
9885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *)
9888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
9889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9891 if (shdr_status || shdr_add_status || rc) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9893 "2505 EQ_DESTROY mailbox failed with "
9894 "status x%x add_status x%x, mbx status x%x\n",
9895 shdr_status, shdr_add_status, rc);
9896 status = -ENXIO;
9897 }
9898
9899 /* Remove eq from any list */
9900 list_del_init(&eq->list);
9901 if (rc != MBX_TIMEOUT)
9902 mempool_free(mbox, eq->phba->mbox_mem_pool);
9903 return status;
9904}
9905
9906/**
9907 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
9908 * @cq: The queue structure associated with the queue to destroy.
9909 *
9910 * This function destroys a queue, as detailed in @cq by sending an mailbox
9911 * command, specific to the type of queue, to the HBA.
9912 *
9913 * The @cq struct is used to get the queue ID of the queue to destroy.
9914 *
9915 * On success this function will return a zero. If the queue destroy mailbox
9916 * command fails this function will return ENXIO.
9917 **/
9918uint32_t
9919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
9920{
9921 LPFC_MBOXQ_t *mbox;
9922 int rc, length, status = 0;
9923 uint32_t shdr_status, shdr_add_status;
9924 union lpfc_sli4_cfg_shdr *shdr;
9925
9926 if (!cq)
9927 return -ENODEV;
9928 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
9929 if (!mbox)
9930 return -ENOMEM;
9931 length = (sizeof(struct lpfc_mbx_cq_destroy) -
9932 sizeof(struct lpfc_sli4_cfg_mhdr));
9933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9934 LPFC_MBOX_OPCODE_CQ_DESTROY,
9935 length, LPFC_SLI4_MBX_EMBED);
9936 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
9937 cq->queue_id);
9938 mbox->vport = cq->phba->pport;
9939 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9940 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
9941 /* The IOCTL status is embedded in the mailbox subheader. */
9942 shdr = (union lpfc_sli4_cfg_shdr *)
9943 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
9944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9946 if (shdr_status || shdr_add_status || rc) {
9947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9948 "2506 CQ_DESTROY mailbox failed with "
9949 "status x%x add_status x%x, mbx status x%x\n",
9950 shdr_status, shdr_add_status, rc);
9951 status = -ENXIO;
9952 }
9953 /* Remove cq from any list */
9954 list_del_init(&cq->list);
9955 if (rc != MBX_TIMEOUT)
9956 mempool_free(mbox, cq->phba->mbox_mem_pool);
9957 return status;
9958}
9959
9960/**
9961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
9962 * @qm: The queue structure associated with the queue to destroy.
9963 *
9964 * This function destroys a queue, as detailed in @mq by sending an mailbox
9965 * command, specific to the type of queue, to the HBA.
9966 *
9967 * The @mq struct is used to get the queue ID of the queue to destroy.
9968 *
9969 * On success this function will return a zero. If the queue destroy mailbox
9970 * command fails this function will return ENXIO.
9971 **/
9972uint32_t
9973lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
9974{
9975 LPFC_MBOXQ_t *mbox;
9976 int rc, length, status = 0;
9977 uint32_t shdr_status, shdr_add_status;
9978 union lpfc_sli4_cfg_shdr *shdr;
9979
9980 if (!mq)
9981 return -ENODEV;
9982 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
9983 if (!mbox)
9984 return -ENOMEM;
9985 length = (sizeof(struct lpfc_mbx_mq_destroy) -
9986 sizeof(struct lpfc_sli4_cfg_mhdr));
9987 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9988 LPFC_MBOX_OPCODE_MQ_DESTROY,
9989 length, LPFC_SLI4_MBX_EMBED);
9990 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
9991 mq->queue_id);
9992 mbox->vport = mq->phba->pport;
9993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9994 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
9995 /* The IOCTL status is embedded in the mailbox subheader. */
9996 shdr = (union lpfc_sli4_cfg_shdr *)
9997 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
9998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10000 if (shdr_status || shdr_add_status || rc) {
10001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10002 "2507 MQ_DESTROY mailbox failed with "
10003 "status x%x add_status x%x, mbx status x%x\n",
10004 shdr_status, shdr_add_status, rc);
10005 status = -ENXIO;
10006 }
10007 /* Remove mq from any list */
10008 list_del_init(&mq->list);
10009 if (rc != MBX_TIMEOUT)
10010 mempool_free(mbox, mq->phba->mbox_mem_pool);
10011 return status;
10012}
10013
10014/**
10015 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10016 * @wq: The queue structure associated with the queue to destroy.
10017 *
10018 * This function destroys a queue, as detailed in @wq by sending an mailbox
10019 * command, specific to the type of queue, to the HBA.
10020 *
10021 * The @wq struct is used to get the queue ID of the queue to destroy.
10022 *
10023 * On success this function will return a zero. If the queue destroy mailbox
10024 * command fails this function will return ENXIO.
10025 **/
10026uint32_t
10027lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10028{
10029 LPFC_MBOXQ_t *mbox;
10030 int rc, length, status = 0;
10031 uint32_t shdr_status, shdr_add_status;
10032 union lpfc_sli4_cfg_shdr *shdr;
10033
10034 if (!wq)
10035 return -ENODEV;
10036 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10037 if (!mbox)
10038 return -ENOMEM;
10039 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10040 sizeof(struct lpfc_sli4_cfg_mhdr));
10041 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10042 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10043 length, LPFC_SLI4_MBX_EMBED);
10044 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10045 wq->queue_id);
10046 mbox->vport = wq->phba->pport;
10047 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10048 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10049 shdr = (union lpfc_sli4_cfg_shdr *)
10050 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10053 if (shdr_status || shdr_add_status || rc) {
10054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10055 "2508 WQ_DESTROY mailbox failed with "
10056 "status x%x add_status x%x, mbx status x%x\n",
10057 shdr_status, shdr_add_status, rc);
10058 status = -ENXIO;
10059 }
10060 /* Remove wq from any list */
10061 list_del_init(&wq->list);
10062 if (rc != MBX_TIMEOUT)
10063 mempool_free(mbox, wq->phba->mbox_mem_pool);
10064 return status;
10065}
10066
10067/**
10068 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10069 * @rq: The queue structure associated with the queue to destroy.
10070 *
10071 * This function destroys a queue, as detailed in @rq by sending an mailbox
10072 * command, specific to the type of queue, to the HBA.
10073 *
10074 * The @rq struct is used to get the queue ID of the queue to destroy.
10075 *
10076 * On success this function will return a zero. If the queue destroy mailbox
10077 * command fails this function will return ENXIO.
10078 **/
10079uint32_t
10080lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10081 struct lpfc_queue *drq)
10082{
10083 LPFC_MBOXQ_t *mbox;
10084 int rc, length, status = 0;
10085 uint32_t shdr_status, shdr_add_status;
10086 union lpfc_sli4_cfg_shdr *shdr;
10087
10088 if (!hrq || !drq)
10089 return -ENODEV;
10090 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10091 if (!mbox)
10092 return -ENOMEM;
10093 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10094 sizeof(struct mbox_header));
10095 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10096 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10097 length, LPFC_SLI4_MBX_EMBED);
10098 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10099 hrq->queue_id);
10100 mbox->vport = hrq->phba->pport;
10101 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10102 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10103 /* The IOCTL status is embedded in the mailbox subheader. */
10104 shdr = (union lpfc_sli4_cfg_shdr *)
10105 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10106 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10107 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10108 if (shdr_status || shdr_add_status || rc) {
10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10110 "2509 RQ_DESTROY mailbox failed with "
10111 "status x%x add_status x%x, mbx status x%x\n",
10112 shdr_status, shdr_add_status, rc);
10113 if (rc != MBX_TIMEOUT)
10114 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10115 return -ENXIO;
10116 }
10117 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10118 drq->queue_id);
10119 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10120 shdr = (union lpfc_sli4_cfg_shdr *)
10121 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10122 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10123 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10124 if (shdr_status || shdr_add_status || rc) {
10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10126 "2510 RQ_DESTROY mailbox failed with "
10127 "status x%x add_status x%x, mbx status x%x\n",
10128 shdr_status, shdr_add_status, rc);
10129 status = -ENXIO;
10130 }
10131 list_del_init(&hrq->list);
10132 list_del_init(&drq->list);
10133 if (rc != MBX_TIMEOUT)
10134 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10135 return status;
10136}
10137
10138/**
10139 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10140 * @phba: The virtual port for which this call being executed.
10141 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10142 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10143 * @xritag: the xritag that ties this io to the SGL pages.
10144 *
10145 * This routine will post the sgl pages for the IO that has the xritag
10146 * that is in the iocbq structure. The xritag is assigned during iocbq
10147 * creation and persists for as long as the driver is loaded.
10148 * if the caller has fewer than 256 scatter gather segments to map then
10149 * pdma_phys_addr1 should be 0.
10150 * If the caller needs to map more than 256 scatter gather segment then
10151 * pdma_phys_addr1 should be a valid physical address.
10152 * physical address for SGLs must be 64 byte aligned.
10153 * If you are going to map 2 SGL's then the first one must have 256 entries
10154 * the second sgl can have between 1 and 256 entries.
10155 *
10156 * Return codes:
10157 * 0 - Success
10158 * -ENXIO, -ENOMEM - Failure
10159 **/
10160int
10161lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10162 dma_addr_t pdma_phys_addr0,
10163 dma_addr_t pdma_phys_addr1,
10164 uint16_t xritag)
10165{
10166 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10167 LPFC_MBOXQ_t *mbox;
10168 int rc;
10169 uint32_t shdr_status, shdr_add_status;
10170 union lpfc_sli4_cfg_shdr *shdr;
10171
10172 if (xritag == NO_XRI) {
10173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10174 "0364 Invalid param:\n");
10175 return -EINVAL;
10176 }
10177
10178 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10179 if (!mbox)
10180 return -ENOMEM;
10181
10182 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10183 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10184 sizeof(struct lpfc_mbx_post_sgl_pages) -
10185 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10186
10187 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10188 &mbox->u.mqe.un.post_sgl_pages;
10189 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10190 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10191
10192 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10193 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10194 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10195 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10196
10197 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10198 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10199 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10200 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10201 if (!phba->sli4_hba.intr_enable)
10202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10203 else
10204 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10205 /* The IOCTL status is embedded in the mailbox subheader. */
10206 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10207 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10208 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10209 if (rc != MBX_TIMEOUT)
10210 mempool_free(mbox, phba->mbox_mem_pool);
10211 if (shdr_status || shdr_add_status || rc) {
10212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10213 "2511 POST_SGL mailbox failed with "
10214 "status x%x add_status x%x, mbx status x%x\n",
10215 shdr_status, shdr_add_status, rc);
10216 rc = -ENXIO;
10217 }
10218 return 0;
10219}
10220/**
10221 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10222 * @phba: The virtual port for which this call being executed.
10223 *
10224 * This routine will remove all of the sgl pages registered with the hba.
10225 *
10226 * Return codes:
10227 * 0 - Success
10228 * -ENXIO, -ENOMEM - Failure
10229 **/
10230int
10231lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10232{
10233 LPFC_MBOXQ_t *mbox;
10234 int rc;
10235 uint32_t shdr_status, shdr_add_status;
10236 union lpfc_sli4_cfg_shdr *shdr;
10237
10238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10239 if (!mbox)
10240 return -ENOMEM;
10241
10242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10243 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10244 LPFC_SLI4_MBX_EMBED);
10245 if (!phba->sli4_hba.intr_enable)
10246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10247 else
10248 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10249 /* The IOCTL status is embedded in the mailbox subheader. */
10250 shdr = (union lpfc_sli4_cfg_shdr *)
10251 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10252 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10254 if (rc != MBX_TIMEOUT)
10255 mempool_free(mbox, phba->mbox_mem_pool);
10256 if (shdr_status || shdr_add_status || rc) {
10257 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10258 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10259 "status x%x add_status x%x, mbx status x%x\n",
10260 shdr_status, shdr_add_status, rc);
10261 rc = -ENXIO;
10262 }
10263 return rc;
10264}
10265
10266/**
10267 * lpfc_sli4_next_xritag - Get an xritag for the io
10268 * @phba: Pointer to HBA context object.
10269 *
10270 * This function gets an xritag for the iocb. If there is no unused xritag
10271 * it will return 0xffff.
10272 * The function returns the allocated xritag if successful, else returns zero.
10273 * Zero is not a valid xritag.
10274 * The caller is not required to hold any lock.
10275 **/
10276uint16_t
10277lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10278{
10279 uint16_t xritag;
10280
10281 spin_lock_irq(&phba->hbalock);
10282 xritag = phba->sli4_hba.next_xri;
10283 if ((xritag != (uint16_t) -1) && xritag <
10284 (phba->sli4_hba.max_cfg_param.max_xri
10285 + phba->sli4_hba.max_cfg_param.xri_base)) {
10286 phba->sli4_hba.next_xri++;
10287 phba->sli4_hba.max_cfg_param.xri_used++;
10288 spin_unlock_irq(&phba->hbalock);
10289 return xritag;
10290 }
10291 spin_unlock_irq(&phba->hbalock);
10292
10293 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10294 "2004 Failed to allocate XRI.last XRITAG is %d"
10295 " Max XRI is %d, Used XRI is %d\n",
10296 phba->sli4_hba.next_xri,
10297 phba->sli4_hba.max_cfg_param.max_xri,
10298 phba->sli4_hba.max_cfg_param.xri_used);
10299 return -1;
10300}
10301
10302/**
10303 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10304 * @phba: pointer to lpfc hba data structure.
10305 *
10306 * This routine is invoked to post a block of driver's sgl pages to the
10307 * HBA using non-embedded mailbox command. No Lock is held. This routine
10308 * is only called when the driver is loading and after all IO has been
10309 * stopped.
10310 **/
10311int
10312lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10313{
10314 struct lpfc_sglq *sglq_entry;
10315 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10316 struct sgl_page_pairs *sgl_pg_pairs;
10317 void *viraddr;
10318 LPFC_MBOXQ_t *mbox;
10319 uint32_t reqlen, alloclen, pg_pairs;
10320 uint32_t mbox_tmo;
10321 uint16_t xritag_start = 0;
10322 int els_xri_cnt, rc = 0;
10323 uint32_t shdr_status, shdr_add_status;
10324 union lpfc_sli4_cfg_shdr *shdr;
10325
10326 /* The number of sgls to be posted */
10327 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10328
10329 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10330 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10331 if (reqlen > PAGE_SIZE) {
10332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10333 "2559 Block sgl registration required DMA "
10334 "size (%d) great than a page\n", reqlen);
10335 return -ENOMEM;
10336 }
10337 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10338 if (!mbox) {
10339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10340 "2560 Failed to allocate mbox cmd memory\n");
10341 return -ENOMEM;
10342 }
10343
10344 /* Allocate DMA memory and set up the non-embedded mailbox command */
10345 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10346 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10347 LPFC_SLI4_MBX_NEMBED);
10348
10349 if (alloclen < reqlen) {
10350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10351 "0285 Allocated DMA memory size (%d) is "
10352 "less than the requested DMA memory "
10353 "size (%d)\n", alloclen, reqlen);
10354 lpfc_sli4_mbox_cmd_free(phba, mbox);
10355 return -ENOMEM;
10356 }
10357
10358 /* Get the first SGE entry from the non-embedded DMA memory */
10359 if (unlikely(!mbox->sge_array)) {
10360 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10361 "2525 Failed to get the non-embedded SGE "
10362 "virtual address\n");
10363 lpfc_sli4_mbox_cmd_free(phba, mbox);
10364 return -ENOMEM;
10365 }
10366 viraddr = mbox->sge_array->addr[0];
10367
10368 /* Set up the SGL pages in the non-embedded DMA pages */
10369 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10370 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10371
10372 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10373 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10374 /* Set up the sge entry */
10375 sgl_pg_pairs->sgl_pg0_addr_lo =
10376 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10377 sgl_pg_pairs->sgl_pg0_addr_hi =
10378 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10379 sgl_pg_pairs->sgl_pg1_addr_lo =
10380 cpu_to_le32(putPaddrLow(0));
10381 sgl_pg_pairs->sgl_pg1_addr_hi =
10382 cpu_to_le32(putPaddrHigh(0));
10383 /* Keep the first xritag on the list */
10384 if (pg_pairs == 0)
10385 xritag_start = sglq_entry->sli4_xritag;
10386 sgl_pg_pairs++;
10387 }
10388 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10389 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10390 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10391 /* Perform endian conversion if necessary */
10392 sgl->word0 = cpu_to_le32(sgl->word0);
10393
10394 if (!phba->sli4_hba.intr_enable)
10395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10396 else {
10397 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10398 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10399 }
10400 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10403 if (rc != MBX_TIMEOUT)
10404 lpfc_sli4_mbox_cmd_free(phba, mbox);
10405 if (shdr_status || shdr_add_status || rc) {
10406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10407 "2513 POST_SGL_BLOCK mailbox command failed "
10408 "status x%x add_status x%x mbx status x%x\n",
10409 shdr_status, shdr_add_status, rc);
10410 rc = -ENXIO;
10411 }
10412 return rc;
10413}
10414
10415/**
10416 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10417 * @phba: pointer to lpfc hba data structure.
10418 * @sblist: pointer to scsi buffer list.
10419 * @count: number of scsi buffers on the list.
10420 *
10421 * This routine is invoked to post a block of @count scsi sgl pages from a
10422 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10423 * No Lock is held.
10424 *
10425 **/
10426int
10427lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10428 int cnt)
10429{
10430 struct lpfc_scsi_buf *psb;
10431 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10432 struct sgl_page_pairs *sgl_pg_pairs;
10433 void *viraddr;
10434 LPFC_MBOXQ_t *mbox;
10435 uint32_t reqlen, alloclen, pg_pairs;
10436 uint32_t mbox_tmo;
10437 uint16_t xritag_start = 0;
10438 int rc = 0;
10439 uint32_t shdr_status, shdr_add_status;
10440 dma_addr_t pdma_phys_bpl1;
10441 union lpfc_sli4_cfg_shdr *shdr;
10442
10443 /* Calculate the requested length of the dma memory */
10444 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10445 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10446 if (reqlen > PAGE_SIZE) {
10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10448 "0217 Block sgl registration required DMA "
10449 "size (%d) great than a page\n", reqlen);
10450 return -ENOMEM;
10451 }
10452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10453 if (!mbox) {
10454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10455 "0283 Failed to allocate mbox cmd memory\n");
10456 return -ENOMEM;
10457 }
10458
10459 /* Allocate DMA memory and set up the non-embedded mailbox command */
10460 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10461 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10462 LPFC_SLI4_MBX_NEMBED);
10463
10464 if (alloclen < reqlen) {
10465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10466 "2561 Allocated DMA memory size (%d) is "
10467 "less than the requested DMA memory "
10468 "size (%d)\n", alloclen, reqlen);
10469 lpfc_sli4_mbox_cmd_free(phba, mbox);
10470 return -ENOMEM;
10471 }
10472
10473 /* Get the first SGE entry from the non-embedded DMA memory */
10474 if (unlikely(!mbox->sge_array)) {
10475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10476 "2565 Failed to get the non-embedded SGE "
10477 "virtual address\n");
10478 lpfc_sli4_mbox_cmd_free(phba, mbox);
10479 return -ENOMEM;
10480 }
10481 viraddr = mbox->sge_array->addr[0];
10482
10483 /* Set up the SGL pages in the non-embedded DMA pages */
10484 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10485 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10486
10487 pg_pairs = 0;
10488 list_for_each_entry(psb, sblist, list) {
10489 /* Set up the sge entry */
10490 sgl_pg_pairs->sgl_pg0_addr_lo =
10491 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10492 sgl_pg_pairs->sgl_pg0_addr_hi =
10493 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10494 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10495 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10496 else
10497 pdma_phys_bpl1 = 0;
10498 sgl_pg_pairs->sgl_pg1_addr_lo =
10499 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10500 sgl_pg_pairs->sgl_pg1_addr_hi =
10501 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10502 /* Keep the first xritag on the list */
10503 if (pg_pairs == 0)
10504 xritag_start = psb->cur_iocbq.sli4_xritag;
10505 sgl_pg_pairs++;
10506 pg_pairs++;
10507 }
10508 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10509 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10510 /* Perform endian conversion if necessary */
10511 sgl->word0 = cpu_to_le32(sgl->word0);
10512
10513 if (!phba->sli4_hba.intr_enable)
10514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10515 else {
10516 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10517 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10518 }
10519 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10522 if (rc != MBX_TIMEOUT)
10523 lpfc_sli4_mbox_cmd_free(phba, mbox);
10524 if (shdr_status || shdr_add_status || rc) {
10525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10526 "2564 POST_SGL_BLOCK mailbox command failed "
10527 "status x%x add_status x%x mbx status x%x\n",
10528 shdr_status, shdr_add_status, rc);
10529 rc = -ENXIO;
10530 }
10531 return rc;
10532}
10533
10534/**
10535 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10536 * @phba: pointer to lpfc_hba struct that the frame was received on
10537 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10538 *
10539 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10540 * valid type of frame that the LPFC driver will handle. This function will
10541 * return a zero if the frame is a valid frame or a non zero value when the
10542 * frame does not pass the check.
10543 **/
10544static int
10545lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10546{
10547 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10548 char *type_names[] = FC_TYPE_NAMES_INIT;
10549 struct fc_vft_header *fc_vft_hdr;
10550
10551 switch (fc_hdr->fh_r_ctl) {
10552 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10553 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10554 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10555 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10556 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10557 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10558 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10559 case FC_RCTL_DD_CMD_STATUS: /* command status */
10560 case FC_RCTL_ELS_REQ: /* extended link services request */
10561 case FC_RCTL_ELS_REP: /* extended link services reply */
10562 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10563 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10564 case FC_RCTL_BA_NOP: /* basic link service NOP */
10565 case FC_RCTL_BA_ABTS: /* basic link service abort */
10566 case FC_RCTL_BA_RMC: /* remove connection */
10567 case FC_RCTL_BA_ACC: /* basic accept */
10568 case FC_RCTL_BA_RJT: /* basic reject */
10569 case FC_RCTL_BA_PRMT:
10570 case FC_RCTL_ACK_1: /* acknowledge_1 */
10571 case FC_RCTL_ACK_0: /* acknowledge_0 */
10572 case FC_RCTL_P_RJT: /* port reject */
10573 case FC_RCTL_F_RJT: /* fabric reject */
10574 case FC_RCTL_P_BSY: /* port busy */
10575 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10576 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10577 case FC_RCTL_LCR: /* link credit reset */
10578 case FC_RCTL_END: /* end */
10579 break;
10580 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10581 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10582 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10583 return lpfc_fc_frame_check(phba, fc_hdr);
10584 default:
10585 goto drop;
10586 }
10587 switch (fc_hdr->fh_type) {
10588 case FC_TYPE_BLS:
10589 case FC_TYPE_ELS:
10590 case FC_TYPE_FCP:
10591 case FC_TYPE_CT:
10592 break;
10593 case FC_TYPE_IP:
10594 case FC_TYPE_ILS:
10595 default:
10596 goto drop;
10597 }
10598 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10599 "2538 Received frame rctl:%s type:%s\n",
10600 rctl_names[fc_hdr->fh_r_ctl],
10601 type_names[fc_hdr->fh_type]);
10602 return 0;
10603drop:
10604 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10605 "2539 Dropped frame rctl:%s type:%s\n",
10606 rctl_names[fc_hdr->fh_r_ctl],
10607 type_names[fc_hdr->fh_type]);
10608 return 1;
10609}
10610
10611/**
10612 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10613 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10614 *
10615 * This function processes the FC header to retrieve the VFI from the VF
10616 * header, if one exists. This function will return the VFI if one exists
10617 * or 0 if no VSAN Header exists.
10618 **/
10619static uint32_t
10620lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10621{
10622 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10623
10624 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10625 return 0;
10626 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10627}
10628
10629/**
10630 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10631 * @phba: Pointer to the HBA structure to search for the vport on
10632 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10633 * @fcfi: The FC Fabric ID that the frame came from
10634 *
10635 * This function searches the @phba for a vport that matches the content of the
10636 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10637 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10638 * returns the matching vport pointer or NULL if unable to match frame to a
10639 * vport.
10640 **/
10641static struct lpfc_vport *
10642lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10643 uint16_t fcfi)
10644{
10645 struct lpfc_vport **vports;
10646 struct lpfc_vport *vport = NULL;
10647 int i;
10648 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10649 fc_hdr->fh_d_id[1] << 8 |
10650 fc_hdr->fh_d_id[2]);
10651
10652 vports = lpfc_create_vport_work_array(phba);
10653 if (vports != NULL)
10654 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10655 if (phba->fcf.fcfi == fcfi &&
10656 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10657 vports[i]->fc_myDID == did) {
10658 vport = vports[i];
10659 break;
10660 }
10661 }
10662 lpfc_destroy_vport_work_array(phba, vports);
10663 return vport;
10664}
10665
10666/**
10667 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10668 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10669 *
10670 * This function searches through the existing incomplete sequences that have
10671 * been sent to this @vport. If the frame matches one of the incomplete
10672 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10673 * make up that sequence. If no sequence is found that matches this frame then
10674 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10675 * This function returns a pointer to the first dmabuf in the sequence list that
10676 * the frame was linked to.
10677 **/
10678static struct hbq_dmabuf *
10679lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10680{
10681 struct fc_frame_header *new_hdr;
10682 struct fc_frame_header *temp_hdr;
10683 struct lpfc_dmabuf *d_buf;
10684 struct lpfc_dmabuf *h_buf;
10685 struct hbq_dmabuf *seq_dmabuf = NULL;
10686 struct hbq_dmabuf *temp_dmabuf = NULL;
10687
10688 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10689 /* Use the hdr_buf to find the sequence that this frame belongs to */
10690 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10691 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10692 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10693 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10694 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10695 continue;
10696 /* found a pending sequence that matches this frame */
10697 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10698 break;
10699 }
10700 if (!seq_dmabuf) {
10701 /*
10702 * This indicates first frame received for this sequence.
10703 * Queue the buffer on the vport's rcv_buffer_list.
10704 */
10705 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10706 return dmabuf;
10707 }
10708 temp_hdr = seq_dmabuf->hbuf.virt;
10709 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10710 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10711 return dmabuf;
10712 }
10713 /* find the correct place in the sequence to insert this frame */
10714 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10715 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10716 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10717 /*
10718 * If the frame's sequence count is greater than the frame on
10719 * the list then insert the frame right after this frame
10720 */
10721 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10722 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10723 return seq_dmabuf;
10724 }
10725 }
10726 return NULL;
10727}
10728
10729/**
10730 * lpfc_seq_complete - Indicates if a sequence is complete
10731 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10732 *
10733 * This function checks the sequence, starting with the frame described by
10734 * @dmabuf, to see if all the frames associated with this sequence are present.
10735 * the frames associated with this sequence are linked to the @dmabuf using the
10736 * dbuf list. This function looks for two major things. 1) That the first frame
10737 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10738 * set. 3) That there are no holes in the sequence count. The function will
10739 * return 1 when the sequence is complete, otherwise it will return 0.
10740 **/
10741static int
10742lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10743{
10744 struct fc_frame_header *hdr;
10745 struct lpfc_dmabuf *d_buf;
10746 struct hbq_dmabuf *seq_dmabuf;
10747 uint32_t fctl;
10748 int seq_count = 0;
10749
10750 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10751 /* make sure first fame of sequence has a sequence count of zero */
10752 if (hdr->fh_seq_cnt != seq_count)
10753 return 0;
10754 fctl = (hdr->fh_f_ctl[0] << 16 |
10755 hdr->fh_f_ctl[1] << 8 |
10756 hdr->fh_f_ctl[2]);
10757 /* If last frame of sequence we can return success. */
10758 if (fctl & FC_FC_END_SEQ)
10759 return 1;
10760 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10761 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10762 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10763 /* If there is a hole in the sequence count then fail. */
10764 if (++seq_count != hdr->fh_seq_cnt)
10765 return 0;
10766 fctl = (hdr->fh_f_ctl[0] << 16 |
10767 hdr->fh_f_ctl[1] << 8 |
10768 hdr->fh_f_ctl[2]);
10769 /* If last frame of sequence we can return success. */
10770 if (fctl & FC_FC_END_SEQ)
10771 return 1;
10772 }
10773 return 0;
10774}
10775
10776/**
10777 * lpfc_prep_seq - Prep sequence for ULP processing
10778 * @vport: Pointer to the vport on which this sequence was received
10779 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10780 *
10781 * This function takes a sequence, described by a list of frames, and creates
10782 * a list of iocbq structures to describe the sequence. This iocbq list will be
10783 * used to issue to the generic unsolicited sequence handler. This routine
10784 * returns a pointer to the first iocbq in the list. If the function is unable
10785 * to allocate an iocbq then it throw out the received frames that were not
10786 * able to be described and return a pointer to the first iocbq. If unable to
10787 * allocate any iocbqs (including the first) this function will return NULL.
10788 **/
10789static struct lpfc_iocbq *
10790lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10791{
10792 struct lpfc_dmabuf *d_buf, *n_buf;
10793 struct lpfc_iocbq *first_iocbq, *iocbq;
10794 struct fc_frame_header *fc_hdr;
10795 uint32_t sid;
10796
10797 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10798 /* remove from receive buffer list */
10799 list_del_init(&seq_dmabuf->hbuf.list);
10800 /* get the Remote Port's SID */
10801 sid = (fc_hdr->fh_s_id[0] << 16 |
10802 fc_hdr->fh_s_id[1] << 8 |
10803 fc_hdr->fh_s_id[2]);
10804 /* Get an iocbq struct to fill in. */
10805 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10806 if (first_iocbq) {
10807 /* Initialize the first IOCB. */
10808 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10809 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10810 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10811 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10812 vport->vpi + vport->phba->vpi_base;
10813 /* put the first buffer into the first IOCBq */
10814 first_iocbq->context2 = &seq_dmabuf->dbuf;
10815 first_iocbq->context3 = NULL;
10816 first_iocbq->iocb.ulpBdeCount = 1;
10817 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10818 LPFC_DATA_BUF_SIZE;
10819 first_iocbq->iocb.un.rcvels.remoteID = sid;
10820 }
10821 iocbq = first_iocbq;
10822 /*
10823 * Each IOCBq can have two Buffers assigned, so go through the list
10824 * of buffers for this sequence and save two buffers in each IOCBq
10825 */
10826 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10827 if (!iocbq) {
10828 lpfc_in_buf_free(vport->phba, d_buf);
10829 continue;
10830 }
10831 if (!iocbq->context3) {
10832 iocbq->context3 = d_buf;
10833 iocbq->iocb.ulpBdeCount++;
10834 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10835 LPFC_DATA_BUF_SIZE;
10836 } else {
10837 iocbq = lpfc_sli_get_iocbq(vport->phba);
10838 if (!iocbq) {
10839 if (first_iocbq) {
10840 first_iocbq->iocb.ulpStatus =
10841 IOSTAT_FCP_RSP_ERROR;
10842 first_iocbq->iocb.un.ulpWord[4] =
10843 IOERR_NO_RESOURCES;
10844 }
10845 lpfc_in_buf_free(vport->phba, d_buf);
10846 continue;
10847 }
10848 iocbq->context2 = d_buf;
10849 iocbq->context3 = NULL;
10850 iocbq->iocb.ulpBdeCount = 1;
10851 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10852 LPFC_DATA_BUF_SIZE;
10853 iocbq->iocb.un.rcvels.remoteID = sid;
10854 list_add_tail(&iocbq->list, &first_iocbq->list);
10855 }
10856 }
10857 return first_iocbq;
10858}
10859
10860/**
10861 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10862 * @phba: Pointer to HBA context object.
10863 *
10864 * This function is called with no lock held. This function processes all
10865 * the received buffers and gives it to upper layers when a received buffer
10866 * indicates that it is the final frame in the sequence. The interrupt
10867 * service routine processes received buffers at interrupt contexts and adds
10868 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10869 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10870 * appropriate receive function when the final frame in a sequence is received.
10871 **/
10872int
10873lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
10874{
10875 LIST_HEAD(cmplq);
10876 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
10877 struct fc_frame_header *fc_hdr;
10878 struct lpfc_vport *vport;
10879 uint32_t fcfi;
10880 struct lpfc_iocbq *iocbq;
10881
10882 /* Clear hba flag and get all received buffers into the cmplq */
10883 spin_lock_irq(&phba->hbalock);
10884 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
10885 list_splice_init(&phba->rb_pend_list, &cmplq);
10886 spin_unlock_irq(&phba->hbalock);
10887
10888 /* Process each received buffer */
10889 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
10890 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10891 /* check to see if this a valid type of frame */
10892 if (lpfc_fc_frame_check(phba, fc_hdr)) {
10893 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10894 continue;
10895 }
10896 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
10897 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
10898 if (!vport) {
10899 /* throw out the frame */
10900 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10901 continue;
10902 }
10903 /* Link this frame */
10904 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
10905 if (!seq_dmabuf) {
10906 /* unable to add frame to vport - throw it out */
10907 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10908 continue;
10909 }
10910 /* If not last frame in sequence continue processing frames. */
10911 if (!lpfc_seq_complete(seq_dmabuf)) {
10912 /*
10913 * When saving off frames post a new one and mark this
10914 * frame to be freed when it is finished.
10915 **/
10916 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
10917 dmabuf->tag = -1;
10918 continue;
10919 }
10920 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10921 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
10922 if (!lpfc_complete_unsol_iocb(phba,
10923 &phba->sli.ring[LPFC_ELS_RING],
10924 iocbq, fc_hdr->fh_r_ctl,
10925 fc_hdr->fh_type))
10926 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10927 "2540 Ring %d handler: unexpected Rctl "
10928 "x%x Type x%x received\n",
10929 LPFC_ELS_RING,
10930 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
10931 };
10932 return 0;
10933}
10934
10935/**
10936 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
10937 * @phba: pointer to lpfc hba data structure.
10938 *
10939 * This routine is invoked to post rpi header templates to the
10940 * HBA consistent with the SLI-4 interface spec. This routine
10941 * posts a PAGE_SIZE memory region to the port to hold up to
10942 * PAGE_SIZE modulo 64 rpi context headers.
10943 *
10944 * This routine does not require any locks. It's usage is expected
10945 * to be driver load or reset recovery when the driver is
10946 * sequential.
10947 *
10948 * Return codes
10949 * 0 - sucessful
10950 * EIO - The mailbox failed to complete successfully.
10951 * When this error occurs, the driver is not guaranteed
10952 * to have any rpi regions posted to the device and
10953 * must either attempt to repost the regions or take a
10954 * fatal error.
10955 **/
10956int
10957lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
10958{
10959 struct lpfc_rpi_hdr *rpi_page;
10960 uint32_t rc = 0;
10961
10962 /* Post all rpi memory regions to the port. */
10963 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
10964 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
10965 if (rc != MBX_SUCCESS) {
10966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10967 "2008 Error %d posting all rpi "
10968 "headers\n", rc);
10969 rc = -EIO;
10970 break;
10971 }
10972 }
10973
10974 return rc;
10975}
10976
10977/**
10978 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
10979 * @phba: pointer to lpfc hba data structure.
10980 * @rpi_page: pointer to the rpi memory region.
10981 *
10982 * This routine is invoked to post a single rpi header to the
10983 * HBA consistent with the SLI-4 interface spec. This memory region
10984 * maps up to 64 rpi context regions.
10985 *
10986 * Return codes
10987 * 0 - sucessful
10988 * ENOMEM - No available memory
10989 * EIO - The mailbox failed to complete successfully.
10990 **/
10991int
10992lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
10993{
10994 LPFC_MBOXQ_t *mboxq;
10995 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
10996 uint32_t rc = 0;
10997 uint32_t mbox_tmo;
10998 uint32_t shdr_status, shdr_add_status;
10999 union lpfc_sli4_cfg_shdr *shdr;
11000
11001 /* The port is notified of the header region via a mailbox command. */
11002 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11003 if (!mboxq) {
11004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11005 "2001 Unable to allocate memory for issuing "
11006 "SLI_CONFIG_SPECIAL mailbox command\n");
11007 return -ENOMEM;
11008 }
11009
11010 /* Post all rpi memory regions to the port. */
11011 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11012 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11013 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11014 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11015 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11016 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11017 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11018 hdr_tmpl, rpi_page->page_count);
11019 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11020 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable)
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11030 if (rc != MBX_TIMEOUT)
11031 mempool_free(mboxq, phba->mbox_mem_pool);
11032 if (shdr_status || shdr_add_status || rc) {
11033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11034 "2514 POST_RPI_HDR mailbox failed with "
11035 "status x%x add_status x%x, mbx status x%x\n",
11036 shdr_status, shdr_add_status, rc);
11037 rc = -ENXIO;
11038 }
11039 return rc;
11040}
11041
11042/**
11043 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11044 * @phba: pointer to lpfc hba data structure.
11045 *
11046 * This routine is invoked to post rpi header templates to the
11047 * HBA consistent with the SLI-4 interface spec. This routine
11048 * posts a PAGE_SIZE memory region to the port to hold up to
11049 * PAGE_SIZE modulo 64 rpi context headers.
11050 *
11051 * Returns
11052 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11053 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11054 **/
11055int
11056lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11057{
11058 int rpi;
11059 uint16_t max_rpi, rpi_base, rpi_limit;
11060 uint16_t rpi_remaining;
11061 struct lpfc_rpi_hdr *rpi_hdr;
11062
11063 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11064 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11065 rpi_limit = phba->sli4_hba.next_rpi;
11066
11067 /*
11068 * The valid rpi range is not guaranteed to be zero-based. Start
11069 * the search at the rpi_base as reported by the port.
11070 */
11071 spin_lock_irq(&phba->hbalock);
11072 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11073 if (rpi >= rpi_limit || rpi < rpi_base)
11074 rpi = LPFC_RPI_ALLOC_ERROR;
11075 else {
11076 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11077 phba->sli4_hba.max_cfg_param.rpi_used++;
11078 phba->sli4_hba.rpi_count++;
11079 }
11080
11081 /*
11082 * Don't try to allocate more rpi header regions if the device limit
11083 * on available rpis max has been exhausted.
11084 */
11085 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11086 (phba->sli4_hba.rpi_count >= max_rpi)) {
11087 spin_unlock_irq(&phba->hbalock);
11088 return rpi;
11089 }
11090
11091 /*
11092 * If the driver is running low on rpi resources, allocate another
11093 * page now. Note that the next_rpi value is used because
11094 * it represents how many are actually in use whereas max_rpi notes
11095 * how many are supported max by the device.
11096 */
11097 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11098 phba->sli4_hba.rpi_count;
11099 spin_unlock_irq(&phba->hbalock);
11100 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11101 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11102 if (!rpi_hdr) {
11103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11104 "2002 Error Could not grow rpi "
11105 "count\n");
11106 } else {
11107 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11108 }
11109 }
11110
11111 return rpi;
11112}
11113
11114/**
11115 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11116 * @phba: pointer to lpfc hba data structure.
11117 *
11118 * This routine is invoked to release an rpi to the pool of
11119 * available rpis maintained by the driver.
11120 **/
11121void
11122lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11123{
11124 spin_lock_irq(&phba->hbalock);
11125 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11126 phba->sli4_hba.rpi_count--;
11127 phba->sli4_hba.max_cfg_param.rpi_used--;
11128 spin_unlock_irq(&phba->hbalock);
11129}
11130
11131/**
11132 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11133 * @phba: pointer to lpfc hba data structure.
11134 *
11135 * This routine is invoked to remove the memory region that
11136 * provided rpi via a bitmask.
11137 **/
11138void
11139lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11140{
11141 kfree(phba->sli4_hba.rpi_bmask);
11142}
11143
11144/**
11145 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11146 * @phba: pointer to lpfc hba data structure.
11147 *
11148 * This routine is invoked to remove the memory region that
11149 * provided rpi via a bitmask.
11150 **/
11151int
11152lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11153{
11154 LPFC_MBOXQ_t *mboxq;
11155 struct lpfc_hba *phba = ndlp->phba;
11156 int rc;
11157
11158 /* The port is notified of the header region via a mailbox command. */
11159 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11160 if (!mboxq)
11161 return -ENOMEM;
11162
11163 /* Post all rpi memory regions to the port. */
11164 lpfc_resume_rpi(mboxq, ndlp);
11165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11166 if (rc == MBX_NOT_FINISHED) {
11167 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11168 "2010 Resume RPI Mailbox failed "
11169 "status %d, mbxStatus x%x\n", rc,
11170 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11171 mempool_free(mboxq, phba->mbox_mem_pool);
11172 return -EIO;
11173 }
11174 return 0;
11175}
11176
11177/**
11178 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11179 * @phba: pointer to lpfc hba data structure.
11180 * @vpi: vpi value to activate with the port.
11181 *
11182 * This routine is invoked to activate a vpi with the
11183 * port when the host intends to use vports with a
11184 * nonzero vpi.
11185 *
11186 * Returns:
11187 * 0 success
11188 * -Evalue otherwise
11189 **/
11190int
11191lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11192{
11193 LPFC_MBOXQ_t *mboxq;
11194 int rc = 0;
11195 uint32_t mbox_tmo;
11196
11197 if (vpi == 0)
11198 return -EINVAL;
11199 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11200 if (!mboxq)
11201 return -ENOMEM;
11202 lpfc_init_vpi(mboxq, vpi);
11203 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11205 if (rc != MBX_TIMEOUT)
11206 mempool_free(mboxq, phba->mbox_mem_pool);
11207 if (rc != MBX_SUCCESS) {
11208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11209 "2022 INIT VPI Mailbox failed "
11210 "status %d, mbxStatus x%x\n", rc,
11211 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11212 rc = -EIO;
11213 }
11214 return rc;
11215}
11216
11217/**
11218 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11219 * @phba: pointer to lpfc hba data structure.
11220 * @mboxq: Pointer to mailbox object.
11221 *
11222 * This routine is invoked to manually add a single FCF record. The caller
11223 * must pass a completely initialized FCF_Record. This routine takes
11224 * care of the nonembedded mailbox operations.
11225 **/
11226static void
11227lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11228{
11229 void *virt_addr;
11230 union lpfc_sli4_cfg_shdr *shdr;
11231 uint32_t shdr_status, shdr_add_status;
11232
11233 virt_addr = mboxq->sge_array->addr[0];
11234 /* The IOCTL status is embedded in the mailbox subheader. */
11235 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11238
11239 if ((shdr_status || shdr_add_status) &&
11240 (shdr_status != STATUS_FCF_IN_USE))
11241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11242 "2558 ADD_FCF_RECORD mailbox failed with "
11243 "status x%x add_status x%x\n",
11244 shdr_status, shdr_add_status);
11245
11246 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11247}
11248
11249/**
11250 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11251 * @phba: pointer to lpfc hba data structure.
11252 * @fcf_record: pointer to the initialized fcf record to add.
11253 *
11254 * This routine is invoked to manually add a single FCF record. The caller
11255 * must pass a completely initialized FCF_Record. This routine takes
11256 * care of the nonembedded mailbox operations.
11257 **/
11258int
11259lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11260{
11261 int rc = 0;
11262 LPFC_MBOXQ_t *mboxq;
11263 uint8_t *bytep;
11264 void *virt_addr;
11265 dma_addr_t phys_addr;
11266 struct lpfc_mbx_sge sge;
11267 uint32_t alloc_len, req_len;
11268 uint32_t fcfindex;
11269
11270 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11271 if (!mboxq) {
11272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11273 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11274 return -ENOMEM;
11275 }
11276
11277 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11278 sizeof(uint32_t);
11279
11280 /* Allocate DMA memory and set up the non-embedded mailbox command */
11281 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11282 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11283 req_len, LPFC_SLI4_MBX_NEMBED);
11284 if (alloc_len < req_len) {
11285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11286 "2523 Allocated DMA memory size (x%x) is "
11287 "less than the requested DMA memory "
11288 "size (x%x)\n", alloc_len, req_len);
11289 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11290 return -ENOMEM;
11291 }
11292
11293 /*
11294 * Get the first SGE entry from the non-embedded DMA memory. This
11295 * routine only uses a single SGE.
11296 */
11297 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11298 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11299 if (unlikely(!mboxq->sge_array)) {
11300 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11301 "2526 Failed to get the non-embedded SGE "
11302 "virtual address\n");
11303 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11304 return -ENOMEM;
11305 }
11306 virt_addr = mboxq->sge_array->addr[0];
11307 /*
11308 * Configure the FCF record for FCFI 0. This is the driver's
11309 * hardcoded default and gets used in nonFIP mode.
11310 */
11311 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11312 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11313 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11314
11315 /*
11316 * Copy the fcf_index and the FCF Record Data. The data starts after
11317 * the FCoE header plus word10. The data copy needs to be endian
11318 * correct.
11319 */
11320 bytep += sizeof(uint32_t);
11321 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11322 mboxq->vport = phba->pport;
11323 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11324 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11325 if (rc == MBX_NOT_FINISHED) {
11326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11327 "2515 ADD_FCF_RECORD mailbox failed with "
11328 "status 0x%x\n", rc);
11329 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11330 rc = -EIO;
11331 } else
11332 rc = 0;
11333
11334 return rc;
11335}
11336
11337/**
11338 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11339 * @phba: pointer to lpfc hba data structure.
11340 * @fcf_record: pointer to the fcf record to write the default data.
11341 * @fcf_index: FCF table entry index.
11342 *
11343 * This routine is invoked to build the driver's default FCF record. The
11344 * values used are hardcoded. This routine handles memory initialization.
11345 *
11346 **/
11347void
11348lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11349 struct fcf_record *fcf_record,
11350 uint16_t fcf_index)
11351{
11352 memset(fcf_record, 0, sizeof(struct fcf_record));
11353 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11354 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11355 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11356 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11357 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11358 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11359 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11360 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11361 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11362 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11369 /* Set the VLAN bit map */
11370 if (phba->valid_vlan) {
11371 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11372 = 1 << (phba->vlan_id % 8);
11373 }
11374}
11375
11376/**
11377 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11378 * @phba: pointer to lpfc hba data structure.
11379 * @fcf_index: FCF table entry offset.
11380 *
11381 * This routine is invoked to read up to @fcf_num of FCF record from the
11382 * device starting with the given @fcf_index.
11383 **/
11384int
11385lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11386{
11387 int rc = 0, error;
11388 LPFC_MBOXQ_t *mboxq;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 uint8_t *bytep;
11392 struct lpfc_mbx_sge sge;
11393 uint32_t alloc_len, req_len;
11394 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11395
11396 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11397 if (!mboxq) {
11398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11399 "2000 Failed to allocate mbox for "
11400 "READ_FCF cmd\n");
11401 return -ENOMEM;
11402 }
11403
11404 req_len = sizeof(struct fcf_record) +
11405 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11406
11407 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11408 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11409 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11410 LPFC_SLI4_MBX_NEMBED);
11411
11412 if (alloc_len < req_len) {
11413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11414 "0291 Allocated DMA memory size (x%x) is "
11415 "less than the requested DMA memory "
11416 "size (x%x)\n", alloc_len, req_len);
11417 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11418 return -ENOMEM;
11419 }
11420
11421 /* Get the first SGE entry from the non-embedded DMA memory. This
11422 * routine only uses a single SGE.
11423 */
11424 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11425 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11426 if (unlikely(!mboxq->sge_array)) {
11427 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11428 "2527 Failed to get the non-embedded SGE "
11429 "virtual address\n");
11430 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11431 return -ENOMEM;
11432 }
11433 virt_addr = mboxq->sge_array->addr[0];
11434 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11435
11436 /* Set up command fields */
11437 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11438 /* Perform necessary endian conversion */
11439 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11440 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11441 mboxq->vport = phba->pport;
11442 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11444 if (rc == MBX_NOT_FINISHED) {
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 error = -EIO;
11447 } else
11448 error = 0;
11449 return error;
11450}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6a..7d37eb7459bf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 75 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 76 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 77 struct lpfc_iocbq *);
68 78 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 79};
70 80
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 81#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 91typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 92 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 93 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 94 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 95 MAILBOX_t mb; /* Mailbox cmd */
96 struct lpfc_mqe mqe;
97 } u;
98 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 99 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 100 void *context2; /* caller context information */
88 101
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 102 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 103 uint8_t mbox_flag;
91 104 struct lpfc_mcqe mcqe;
105 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 106} LPFC_MBOXQ_t;
93 107
94#define MBX_POLL 1 /* poll mailbox till command done, then 108#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
230 244
231 /* Additional sli_flags */ 245 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 246#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 247#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 248#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 249#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 250#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
251#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 252
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 253 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 254 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
261 276
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 277#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 278 command */
279#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
280 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 281#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 282 * or erase cmds. This is especially
266 * long because of the potential of 283 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 000000000000..5196b46608d7
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e3078..6b8a148f0a55 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.2"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0aff..a6313ee84ac5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 740 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 741 struct lpfc_vport **vports;
712 int index = 0; 742 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 743 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 744 GFP_KERNEL);
715 if (vports == NULL) 745 if (vports == NULL)
716 return NULL; 746 return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 764 int i;
735 if (vports == NULL) 765 if (vports == NULL)
736 return; 766 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 767 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 768 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 769 kfree(vports);
740} 770}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 36b1d1052ba1..286c185fa9e4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -61,6 +61,7 @@
61#include <scsi/scsi_tcq.h> 61#include <scsi/scsi_tcq.h>
62#include <scsi/scsi_transport_sas.h> 62#include <scsi/scsi_transport_sas.h>
63#include <scsi/scsi_dbg.h> 63#include <scsi/scsi_dbg.h>
64#include <scsi/scsi_eh.h>
64 65
65#include "mpt2sas_debug.h" 66#include "mpt2sas_debug.h"
66 67
@@ -68,10 +69,10 @@
68#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "01.100.02.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
72#define MPT2SAS_MAJOR_VERSION 01 73#define MPT2SAS_MAJOR_VERSION 01
73#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 02 75#define MPT2SAS_BUILD_VERSION 03
75#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
76 77
77/* 78/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ba6ab170bdf0..14e473d1fa7b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
473} 473}
474 474
475/** 475/**
476 * _ctl_do_task_abort - assign an active smid to the abort_task 476 * _ctl_set_task_mid - assign an active smid to tm request
477 * @ioc: per adapter object 477 * @ioc: per adapter object
478 * @karg - (struct mpt2_ioctl_command) 478 * @karg - (struct mpt2_ioctl_command)
479 * @tm_request - pointer to mf from user space 479 * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
482 * during failure, the reply frame is filled. 482 * during failure, the reply frame is filled.
483 */ 483 */
484static int 484static int
485_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, 485_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
486 Mpi2SCSITaskManagementRequest_t *tm_request) 486 Mpi2SCSITaskManagementRequest_t *tm_request)
487{ 487{
488 u8 found = 0; 488 u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
494 Mpi2SCSITaskManagementReply_t *tm_reply; 494 Mpi2SCSITaskManagementReply_t *tm_reply;
495 u32 sz; 495 u32 sz;
496 u32 lun; 496 u32 lun;
497 char *desc = NULL;
498
499 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
500 desc = "abort_task";
501 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
502 desc = "query_task";
503 else
504 return 0;
497 505
498 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 506 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
499 507
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
517 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 525 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
518 526
519 if (!found) { 527 if (!found) {
520 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 528 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
521 "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 529 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
522 tm_request->DevHandle, lun)); 530 desc, tm_request->DevHandle, lun));
523 tm_reply = ioc->ctl_cmds.reply; 531 tm_reply = ioc->ctl_cmds.reply;
524 tm_reply->DevHandle = tm_request->DevHandle; 532 tm_reply->DevHandle = tm_request->DevHandle;
525 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 533 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 534 tm_reply->TaskType = tm_request->TaskType;
527 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 535 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
528 tm_reply->VP_ID = tm_request->VP_ID; 536 tm_reply->VP_ID = tm_request->VP_ID;
529 tm_reply->VF_ID = tm_request->VF_ID; 537 tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
535 return 1; 543 return 1;
536 } 544 }
537 545
538 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 546 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
539 "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, 547 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
540 tm_request->DevHandle, lun, tm_request->TaskMID)); 548 desc, tm_request->DevHandle, lun, tm_request->TaskMID));
541 return 0; 549 return 0;
542} 550}
543 551
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
739 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 747 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
740 748
741 if (tm_request->TaskType == 749 if (tm_request->TaskType ==
742 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 750 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
743 if (_ctl_do_task_abort(ioc, &karg, tm_request)) { 751 tm_request->TaskType ==
752 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
753 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
744 mpt2sas_base_free_smid(ioc, smid); 754 mpt2sas_base_free_smid(ioc, smid);
745 goto out; 755 goto out;
746 } 756 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index e3a7967259e7..2a01a5f2a84d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
197MODULE_DEVICE_TABLE(pci, scsih_pci_table); 197MODULE_DEVICE_TABLE(pci, scsih_pci_table);
198 198
199/** 199/**
200 * scsih_set_debug_level - global setting of ioc->logging_level. 200 * _scsih_set_debug_level - global setting of ioc->logging_level.
201 * 201 *
202 * Note: The logging levels are defined in mpt2sas_debug.h. 202 * Note: The logging levels are defined in mpt2sas_debug.h.
203 */ 203 */
204static int 204static int
205scsih_set_debug_level(const char *val, struct kernel_param *kp) 205_scsih_set_debug_level(const char *val, struct kernel_param *kp)
206{ 206{
207 int ret = param_set_int(val, kp); 207 int ret = param_set_int(val, kp);
208 struct MPT2SAS_ADAPTER *ioc; 208 struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
215 ioc->logging_level = logging_level; 215 ioc->logging_level = logging_level;
216 return 0; 216 return 0;
217} 217}
218module_param_call(logging_level, scsih_set_debug_level, param_get_int, 218module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
219 &logging_level, 0644); 219 &logging_level, 0644);
220 220
221/** 221/**
@@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
884} 884}
885 885
886/** 886/**
887 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
888 * @ioc: per adapter object
889 * @id: target id
890 * @lun: lun number
891 * @channel: channel
892 * Context: This function will acquire ioc->scsi_lookup_lock.
893 *
894 * This will search for a matching channel:id:lun in the scsi_lookup array,
895 * returning 1 if found.
896 */
897static u8
898_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
899 unsigned int lun, int channel)
900{
901 u8 found;
902 unsigned long flags;
903 int i;
904
905 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
906 found = 0;
907 for (i = 0 ; i < ioc->request_depth; i++) {
908 if (ioc->scsi_lookup[i].scmd &&
909 (ioc->scsi_lookup[i].scmd->device->id == id &&
910 ioc->scsi_lookup[i].scmd->device->channel == channel &&
911 ioc->scsi_lookup[i].scmd->device->lun == lun)) {
912 found = 1;
913 goto out;
914 }
915 }
916 out:
917 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
918 return found;
919}
920
921/**
887 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) 922 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
888 * @ioc: per adapter object 923 * @ioc: per adapter object
889 * @smid: system request message index 924 * @smid: system request message index
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1047} 1082}
1048 1083
1049/** 1084/**
1050 * scsih_change_queue_depth - setting device queue depth 1085 * _scsih_change_queue_depth - setting device queue depth
1051 * @sdev: scsi device struct 1086 * @sdev: scsi device struct
1052 * @qdepth: requested queue depth 1087 * @qdepth: requested queue depth
1053 * 1088 *
1054 * Returns queue depth. 1089 * Returns queue depth.
1055 */ 1090 */
1056static int 1091static int
1057scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1092_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1058{ 1093{
1059 struct Scsi_Host *shost = sdev->host; 1094 struct Scsi_Host *shost = sdev->host;
1060 int max_depth; 1095 int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1079} 1114}
1080 1115
1081/** 1116/**
1082 * scsih_change_queue_depth - changing device queue tag type 1117 * _scsih_change_queue_depth - changing device queue tag type
1083 * @sdev: scsi device struct 1118 * @sdev: scsi device struct
1084 * @tag_type: requested tag type 1119 * @tag_type: requested tag type
1085 * 1120 *
1086 * Returns queue tag type. 1121 * Returns queue tag type.
1087 */ 1122 */
1088static int 1123static int
1089scsih_change_queue_type(struct scsi_device *sdev, int tag_type) 1124_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1090{ 1125{
1091 if (sdev->tagged_supported) { 1126 if (sdev->tagged_supported) {
1092 scsi_set_tag_type(sdev, tag_type); 1127 scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1101} 1136}
1102 1137
1103/** 1138/**
1104 * scsih_target_alloc - target add routine 1139 * _scsih_target_alloc - target add routine
1105 * @starget: scsi target struct 1140 * @starget: scsi target struct
1106 * 1141 *
1107 * Returns 0 if ok. Any other return is assumed to be an error and 1142 * Returns 0 if ok. Any other return is assumed to be an error and
1108 * the device is ignored. 1143 * the device is ignored.
1109 */ 1144 */
1110static int 1145static int
1111scsih_target_alloc(struct scsi_target *starget) 1146_scsih_target_alloc(struct scsi_target *starget)
1112{ 1147{
1113 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1148 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1114 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1149 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
1163} 1198}
1164 1199
1165/** 1200/**
1166 * scsih_target_destroy - target destroy routine 1201 * _scsih_target_destroy - target destroy routine
1167 * @starget: scsi target struct 1202 * @starget: scsi target struct
1168 * 1203 *
1169 * Returns nothing. 1204 * Returns nothing.
1170 */ 1205 */
1171static void 1206static void
1172scsih_target_destroy(struct scsi_target *starget) 1207_scsih_target_destroy(struct scsi_target *starget)
1173{ 1208{
1174 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1209 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1175 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1210 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
1212} 1247}
1213 1248
1214/** 1249/**
1215 * scsih_slave_alloc - device add routine 1250 * _scsih_slave_alloc - device add routine
1216 * @sdev: scsi device struct 1251 * @sdev: scsi device struct
1217 * 1252 *
1218 * Returns 0 if ok. Any other return is assumed to be an error and 1253 * Returns 0 if ok. Any other return is assumed to be an error and
1219 * the device is ignored. 1254 * the device is ignored.
1220 */ 1255 */
1221static int 1256static int
1222scsih_slave_alloc(struct scsi_device *sdev) 1257_scsih_slave_alloc(struct scsi_device *sdev)
1223{ 1258{
1224 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1225 struct MPT2SAS_ADAPTER *ioc; 1260 struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
1273} 1308}
1274 1309
1275/** 1310/**
1276 * scsih_slave_destroy - device destroy routine 1311 * _scsih_slave_destroy - device destroy routine
1277 * @sdev: scsi device struct 1312 * @sdev: scsi device struct
1278 * 1313 *
1279 * Returns nothing. 1314 * Returns nothing.
1280 */ 1315 */
1281static void 1316static void
1282scsih_slave_destroy(struct scsi_device *sdev) 1317_scsih_slave_destroy(struct scsi_device *sdev)
1283{ 1318{
1284 struct MPT2SAS_TARGET *sas_target_priv_data; 1319 struct MPT2SAS_TARGET *sas_target_priv_data;
1285 struct scsi_target *starget; 1320 struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
1295} 1330}
1296 1331
1297/** 1332/**
1298 * scsih_display_sata_capabilities - sata capabilities 1333 * _scsih_display_sata_capabilities - sata capabilities
1299 * @ioc: per adapter object 1334 * @ioc: per adapter object
1300 * @sas_device: the sas_device object 1335 * @sas_device: the sas_device object
1301 * @sdev: scsi device struct 1336 * @sdev: scsi device struct
1302 */ 1337 */
1303static void 1338static void
1304scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, 1339_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1305 struct _sas_device *sas_device, struct scsi_device *sdev) 1340 struct _sas_device *sas_device, struct scsi_device *sdev)
1306{ 1341{
1307 Mpi2ConfigReply_t mpi_reply; 1342 Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1401} 1436}
1402 1437
1403/** 1438/**
1404 * scsih_slave_configure - device configure routine. 1439 * _scsih_slave_configure - device configure routine.
1405 * @sdev: scsi device struct 1440 * @sdev: scsi device struct
1406 * 1441 *
1407 * Returns 0 if ok. Any other return is assumed to be an error and 1442 * Returns 0 if ok. Any other return is assumed to be an error and
1408 * the device is ignored. 1443 * the device is ignored.
1409 */ 1444 */
1410static int 1445static int
1411scsih_slave_configure(struct scsi_device *sdev) 1446_scsih_slave_configure(struct scsi_device *sdev)
1412{ 1447{
1413 struct Scsi_Host *shost = sdev->host; 1448 struct Scsi_Host *shost = sdev->host;
1414 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1449 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1489 r_level, raid_device->handle, 1524 r_level, raid_device->handle,
1490 (unsigned long long)raid_device->wwid, 1525 (unsigned long long)raid_device->wwid,
1491 raid_device->num_pds, ds); 1526 raid_device->num_pds, ds);
1492 scsih_change_queue_depth(sdev, qdepth); 1527 _scsih_change_queue_depth(sdev, qdepth);
1493 return 0; 1528 return 0;
1494 } 1529 }
1495 1530
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
1532 sas_device->slot); 1567 sas_device->slot);
1533 1568
1534 if (!ssp_target) 1569 if (!ssp_target)
1535 scsih_display_sata_capabilities(ioc, sas_device, sdev); 1570 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
1536 } 1571 }
1537 1572
1538 scsih_change_queue_depth(sdev, qdepth); 1573 _scsih_change_queue_depth(sdev, qdepth);
1539 1574
1540 if (ssp_target) 1575 if (ssp_target)
1541 sas_read_port_mode_page(sdev); 1576 sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1543} 1578}
1544 1579
1545/** 1580/**
1546 * scsih_bios_param - fetch head, sector, cylinder info for a disk 1581 * _scsih_bios_param - fetch head, sector, cylinder info for a disk
1547 * @sdev: scsi device struct 1582 * @sdev: scsi device struct
1548 * @bdev: pointer to block device context 1583 * @bdev: pointer to block device context
1549 * @capacity: device size (in 512 byte sectors) 1584 * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1555 * Return nothing. 1590 * Return nothing.
1556 */ 1591 */
1557static int 1592static int
1558scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 1593_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1559 sector_t capacity, int params[]) 1594 sector_t capacity, int params[])
1560{ 1595{
1561 int heads; 1596 int heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1636} 1671}
1637 1672
1638/** 1673/**
1639 * scsih_tm_done - tm completion routine 1674 * _scsih_tm_done - tm completion routine
1640 * @ioc: per adapter object 1675 * @ioc: per adapter object
1641 * @smid: system request message index 1676 * @smid: system request message index
1642 * @VF_ID: virtual function id 1677 * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1648 * Return nothing. 1683 * Return nothing.
1649 */ 1684 */
1650static void 1685static void
1651scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 1686_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1652{ 1687{
1653 MPI2DefaultReply_t *mpi_reply; 1688 MPI2DefaultReply_t *mpi_reply;
1654 1689
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1823} 1858}
1824 1859
1825/** 1860/**
1826 * scsih_abort - eh threads main abort routine 1861 * _scsih_abort - eh threads main abort routine
1827 * @sdev: scsi device struct 1862 * @sdev: scsi device struct
1828 * 1863 *
1829 * Returns SUCCESS if command aborted else FAILED 1864 * Returns SUCCESS if command aborted else FAILED
1830 */ 1865 */
1831static int 1866static int
1832scsih_abort(struct scsi_cmnd *scmd) 1867_scsih_abort(struct scsi_cmnd *scmd)
1833{ 1868{
1834 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 1869 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1835 struct MPT2SAS_DEVICE *sas_device_priv_data; 1870 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
1889 return r; 1924 return r;
1890} 1925}
1891 1926
1927/**
1928 * _scsih_dev_reset - eh threads main device reset routine
1929 * @sdev: scsi device struct
1930 *
1931 * Returns SUCCESS if command aborted else FAILED
1932 */
1933static int
1934_scsih_dev_reset(struct scsi_cmnd *scmd)
1935{
1936 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1937 struct MPT2SAS_DEVICE *sas_device_priv_data;
1938 struct _sas_device *sas_device;
1939 unsigned long flags;
1940 u16 handle;
1941 int r;
1942
1943 printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
1944 ioc->name, scmd);
1945 scsi_print_command(scmd);
1946
1947 sas_device_priv_data = scmd->device->hostdata;
1948 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1949 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1950 ioc->name, scmd);
1951 scmd->result = DID_NO_CONNECT << 16;
1952 scmd->scsi_done(scmd);
1953 r = SUCCESS;
1954 goto out;
1955 }
1956
1957 /* for hidden raid components obtain the volume_handle */
1958 handle = 0;
1959 if (sas_device_priv_data->sas_target->flags &
1960 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1961 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1962 sas_device = _scsih_sas_device_find_by_handle(ioc,
1963 sas_device_priv_data->sas_target->handle);
1964 if (sas_device)
1965 handle = sas_device->volume_handle;
1966 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1967 } else
1968 handle = sas_device_priv_data->sas_target->handle;
1969
1970 if (!handle) {
1971 scmd->result = DID_RESET << 16;
1972 r = FAILED;
1973 goto out;
1974 }
1975
1976 mutex_lock(&ioc->tm_cmds.mutex);
1977 mpt2sas_scsih_issue_tm(ioc, handle, 0,
1978 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
1979 30);
1980
1981 /*
1982 * sanity check see whether all commands to this device been
1983 * completed
1984 */
1985 if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
1986 scmd->device->lun, scmd->device->channel))
1987 r = FAILED;
1988 else
1989 r = SUCCESS;
1990 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1991 mutex_unlock(&ioc->tm_cmds.mutex);
1992
1993 out:
1994 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
1995 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1996 return r;
1997}
1892 1998
1893/** 1999/**
1894 * scsih_dev_reset - eh threads main device reset routine 2000 * _scsih_target_reset - eh threads main target reset routine
1895 * @sdev: scsi device struct 2001 * @sdev: scsi device struct
1896 * 2002 *
1897 * Returns SUCCESS if command aborted else FAILED 2003 * Returns SUCCESS if command aborted else FAILED
1898 */ 2004 */
1899static int 2005static int
1900scsih_dev_reset(struct scsi_cmnd *scmd) 2006_scsih_target_reset(struct scsi_cmnd *scmd)
1901{ 2007{
1902 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2008 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1903 struct MPT2SAS_DEVICE *sas_device_priv_data; 2009 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1912 2018
1913 sas_device_priv_data = scmd->device->hostdata; 2019 sas_device_priv_data = scmd->device->hostdata;
1914 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2020 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1915 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2021 printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
1916 ioc->name, scmd); 2022 ioc->name, scmd);
1917 scmd->result = DID_NO_CONNECT << 16; 2023 scmd->result = DID_NO_CONNECT << 16;
1918 scmd->scsi_done(scmd); 2024 scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1962} 2068}
1963 2069
1964/** 2070/**
1965 * scsih_abort - eh threads main host reset routine 2071 * _scsih_abort - eh threads main host reset routine
1966 * @sdev: scsi device struct 2072 * @sdev: scsi device struct
1967 * 2073 *
1968 * Returns SUCCESS if command aborted else FAILED 2074 * Returns SUCCESS if command aborted else FAILED
1969 */ 2075 */
1970static int 2076static int
1971scsih_host_reset(struct scsi_cmnd *scmd) 2077_scsih_host_reset(struct scsi_cmnd *scmd)
1972{ 2078{
1973 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2079 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1974 int r, retval; 2080 int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2390} 2496}
2391 2497
2392/** 2498/**
2393 * scsih_qcmd - main scsi request entry point 2499 * _scsih_setup_eedp - setup MPI request for EEDP transfer
2500 * @scmd: pointer to scsi command object
2501 * @mpi_request: pointer to the SCSI_IO reqest message frame
2502 *
2503 * Supporting protection 1 and 3.
2504 *
2505 * Returns nothing
2506 */
2507static void
2508_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2509{
2510 u16 eedp_flags;
2511 unsigned char prot_op = scsi_get_prot_op(scmd);
2512 unsigned char prot_type = scsi_get_prot_type(scmd);
2513
2514 if (prot_type == SCSI_PROT_DIF_TYPE0 ||
2515 prot_type == SCSI_PROT_DIF_TYPE2 ||
2516 prot_op == SCSI_PROT_NORMAL)
2517 return;
2518
2519 if (prot_op == SCSI_PROT_READ_STRIP)
2520 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
2521 else if (prot_op == SCSI_PROT_WRITE_INSERT)
2522 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2523 else
2524 return;
2525
2526 mpi_request->EEDPBlockSize = scmd->device->sector_size;
2527
2528 switch (prot_type) {
2529 case SCSI_PROT_DIF_TYPE1:
2530
2531 /*
2532 * enable ref/guard checking
2533 * auto increment ref tag
2534 */
2535 mpi_request->EEDPFlags = eedp_flags |
2536 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2537 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2538 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2539 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
2540 cpu_to_be32(scsi_get_lba(scmd));
2541
2542 break;
2543
2544 case SCSI_PROT_DIF_TYPE3:
2545
2546 /*
2547 * enable guard checking
2548 */
2549 mpi_request->EEDPFlags = eedp_flags |
2550 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2551
2552 break;
2553 }
2554}
2555
2556/**
2557 * _scsih_eedp_error_handling - return sense code for EEDP errors
2558 * @scmd: pointer to scsi command object
2559 * @ioc_status: ioc status
2560 *
2561 * Returns nothing
2562 */
2563static void
2564_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
2565{
2566 u8 ascq;
2567 u8 sk;
2568 u8 host_byte;
2569
2570 switch (ioc_status) {
2571 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2572 ascq = 0x01;
2573 break;
2574 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2575 ascq = 0x02;
2576 break;
2577 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2578 ascq = 0x03;
2579 break;
2580 default:
2581 ascq = 0x00;
2582 break;
2583 }
2584
2585 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2586 sk = ILLEGAL_REQUEST;
2587 host_byte = DID_ABORT;
2588 } else {
2589 sk = ABORTED_COMMAND;
2590 host_byte = DID_OK;
2591 }
2592
2593 scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
2594 scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
2595 SAM_STAT_CHECK_CONDITION;
2596}
2597
2598/**
2599 * _scsih_qcmd - main scsi request entry point
2394 * @scmd: pointer to scsi command object 2600 * @scmd: pointer to scsi command object
2395 * @done: function pointer to be invoked on completion 2601 * @done: function pointer to be invoked on completion
2396 * 2602 *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2401 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 2607 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
2402 */ 2608 */
2403static int 2609static int
2404scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 2610_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2405{ 2611{
2406 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2612 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2407 struct MPT2SAS_DEVICE *sas_device_priv_data; 2613 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2470 } 2676 }
2471 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2677 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2472 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 2678 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
2679 _scsih_setup_eedp(scmd, mpi_request);
2473 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2680 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2474 if (sas_device_priv_data->sas_target->flags & 2681 if (sas_device_priv_data->sas_target->flags &
2475 MPT_TARGET_FLAGS_RAID_COMPONENT) 2682 MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2604 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2811 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2605 desc_ioc_state = "scsi ext terminated"; 2812 desc_ioc_state = "scsi ext terminated";
2606 break; 2813 break;
2814 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2815 desc_ioc_state = "eedp guard error";
2816 break;
2817 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2818 desc_ioc_state = "eedp ref tag error";
2819 break;
2820 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2821 desc_ioc_state = "eedp app tag error";
2822 break;
2607 default: 2823 default:
2608 desc_ioc_state = "unknown"; 2824 desc_ioc_state = "unknown";
2609 break; 2825 break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2783} 2999}
2784 3000
2785/** 3001/**
2786 * scsih_io_done - scsi request callback 3002 * _scsih_io_done - scsi request callback
2787 * @ioc: per adapter object 3003 * @ioc: per adapter object
2788 * @smid: system request message index 3004 * @smid: system request message index
2789 * @VF_ID: virtual function id 3005 * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2794 * Return nothing. 3010 * Return nothing.
2795 */ 3011 */
2796static void 3012static void
2797scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 3013_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2798{ 3014{
2799 Mpi2SCSIIORequest_t *mpi_request; 3015 Mpi2SCSIIORequest_t *mpi_request;
2800 Mpi2SCSIIOReply_t *mpi_reply; 3016 Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2939 scmd->result = DID_RESET << 16; 3155 scmd->result = DID_RESET << 16;
2940 break; 3156 break;
2941 3157
3158 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
3159 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
3160 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
3161 _scsih_eedp_error_handling(scmd, ioc_status);
3162 break;
2942 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3163 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2943 case MPI2_IOCSTATUS_INVALID_FUNCTION: 3164 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2944 case MPI2_IOCSTATUS_INVALID_SGL: 3165 case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
5130 .module = THIS_MODULE, 5351 .module = THIS_MODULE,
5131 .name = "Fusion MPT SAS Host", 5352 .name = "Fusion MPT SAS Host",
5132 .proc_name = MPT2SAS_DRIVER_NAME, 5353 .proc_name = MPT2SAS_DRIVER_NAME,
5133 .queuecommand = scsih_qcmd, 5354 .queuecommand = _scsih_qcmd,
5134 .target_alloc = scsih_target_alloc, 5355 .target_alloc = _scsih_target_alloc,
5135 .slave_alloc = scsih_slave_alloc, 5356 .slave_alloc = _scsih_slave_alloc,
5136 .slave_configure = scsih_slave_configure, 5357 .slave_configure = _scsih_slave_configure,
5137 .target_destroy = scsih_target_destroy, 5358 .target_destroy = _scsih_target_destroy,
5138 .slave_destroy = scsih_slave_destroy, 5359 .slave_destroy = _scsih_slave_destroy,
5139 .change_queue_depth = scsih_change_queue_depth, 5360 .change_queue_depth = _scsih_change_queue_depth,
5140 .change_queue_type = scsih_change_queue_type, 5361 .change_queue_type = _scsih_change_queue_type,
5141 .eh_abort_handler = scsih_abort, 5362 .eh_abort_handler = _scsih_abort,
5142 .eh_device_reset_handler = scsih_dev_reset, 5363 .eh_device_reset_handler = _scsih_dev_reset,
5143 .eh_host_reset_handler = scsih_host_reset, 5364 .eh_target_reset_handler = _scsih_target_reset,
5144 .bios_param = scsih_bios_param, 5365 .eh_host_reset_handler = _scsih_host_reset,
5366 .bios_param = _scsih_bios_param,
5145 .can_queue = 1, 5367 .can_queue = 1,
5146 .this_id = -1, 5368 .this_id = -1,
5147 .sg_tablesize = MPT2SAS_SG_DEPTH, 5369 .sg_tablesize = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5228} 5450}
5229 5451
5230/** 5452/**
5231 * scsih_remove - detach and remove add host 5453 * _scsih_remove - detach and remove add host
5232 * @pdev: PCI device struct 5454 * @pdev: PCI device struct
5233 * 5455 *
5234 * Return nothing. 5456 * Return nothing.
5235 */ 5457 */
5236static void __devexit 5458static void __devexit
5237scsih_remove(struct pci_dev *pdev) 5459_scsih_remove(struct pci_dev *pdev)
5238{ 5460{
5239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5461 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5240 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5462 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
5442} 5664}
5443 5665
5444/** 5666/**
5445 * scsih_probe - attach and add scsi host 5667 * _scsih_probe - attach and add scsi host
5446 * @pdev: PCI device struct 5668 * @pdev: PCI device struct
5447 * @id: pci device id 5669 * @id: pci device id
5448 * 5670 *
5449 * Returns 0 success, anything else error. 5671 * Returns 0 success, anything else error.
5450 */ 5672 */
5451static int 5673static int
5452scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5674_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5453{ 5675{
5454 struct MPT2SAS_ADAPTER *ioc; 5676 struct MPT2SAS_ADAPTER *ioc;
5455 struct Scsi_Host *shost; 5677 struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5503 goto out_add_shost_fail; 5725 goto out_add_shost_fail;
5504 } 5726 }
5505 5727
5728 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
5729 | SHOST_DIF_TYPE3_PROTECTION);
5730
5506 /* event thread */ 5731 /* event thread */
5507 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 5732 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
5508 "fw_event%d", ioc->id); 5733 "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5536 5761
5537#ifdef CONFIG_PM 5762#ifdef CONFIG_PM
5538/** 5763/**
5539 * scsih_suspend - power management suspend main entry point 5764 * _scsih_suspend - power management suspend main entry point
5540 * @pdev: PCI device struct 5765 * @pdev: PCI device struct
5541 * @state: PM state change to (usually PCI_D3) 5766 * @state: PM state change to (usually PCI_D3)
5542 * 5767 *
5543 * Returns 0 success, anything else error. 5768 * Returns 0 success, anything else error.
5544 */ 5769 */
5545static int 5770static int
5546scsih_suspend(struct pci_dev *pdev, pm_message_t state) 5771_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5547{ 5772{
5548 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5773 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5549 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5774 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5564} 5789}
5565 5790
5566/** 5791/**
5567 * scsih_resume - power management resume main entry point 5792 * _scsih_resume - power management resume main entry point
5568 * @pdev: PCI device struct 5793 * @pdev: PCI device struct
5569 * 5794 *
5570 * Returns 0 success, anything else error. 5795 * Returns 0 success, anything else error.
5571 */ 5796 */
5572static int 5797static int
5573scsih_resume(struct pci_dev *pdev) 5798_scsih_resume(struct pci_dev *pdev)
5574{ 5799{
5575 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5800 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5576 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5801 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
5599static struct pci_driver scsih_driver = { 5824static struct pci_driver scsih_driver = {
5600 .name = MPT2SAS_DRIVER_NAME, 5825 .name = MPT2SAS_DRIVER_NAME,
5601 .id_table = scsih_pci_table, 5826 .id_table = scsih_pci_table,
5602 .probe = scsih_probe, 5827 .probe = _scsih_probe,
5603 .remove = __devexit_p(scsih_remove), 5828 .remove = __devexit_p(_scsih_remove),
5604#ifdef CONFIG_PM 5829#ifdef CONFIG_PM
5605 .suspend = scsih_suspend, 5830 .suspend = _scsih_suspend,
5606 .resume = scsih_resume, 5831 .resume = _scsih_resume,
5607#endif 5832#endif
5608}; 5833};
5609 5834
5610 5835
5611/** 5836/**
5612 * scsih_init - main entry point for this driver. 5837 * _scsih_init - main entry point for this driver.
5613 * 5838 *
5614 * Returns 0 success, anything else error. 5839 * Returns 0 success, anything else error.
5615 */ 5840 */
5616static int __init 5841static int __init
5617scsih_init(void) 5842_scsih_init(void)
5618{ 5843{
5619 int error; 5844 int error;
5620 5845
@@ -5630,10 +5855,10 @@ scsih_init(void)
5630 mpt2sas_base_initialize_callback_handler(); 5855 mpt2sas_base_initialize_callback_handler();
5631 5856
5632 /* queuecommand callback hander */ 5857 /* queuecommand callback hander */
5633 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); 5858 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
5634 5859
5635 /* task managment callback handler */ 5860 /* task managment callback handler */
5636 tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); 5861 tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
5637 5862
5638 /* base internal commands callback handler */ 5863 /* base internal commands callback handler */
5639 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); 5864 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
5659} 5884}
5660 5885
5661/** 5886/**
5662 * scsih_exit - exit point for this driver (when it is a module). 5887 * _scsih_exit - exit point for this driver (when it is a module).
5663 * 5888 *
5664 * Returns 0 success, anything else error. 5889 * Returns 0 success, anything else error.
5665 */ 5890 */
5666static void __exit 5891static void __exit
5667scsih_exit(void) 5892_scsih_exit(void)
5668{ 5893{
5669 printk(KERN_INFO "mpt2sas version %s unloading\n", 5894 printk(KERN_INFO "mpt2sas version %s unloading\n",
5670 MPT2SAS_DRIVER_VERSION); 5895 MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
5682 mpt2sas_ctl_exit(); 5907 mpt2sas_ctl_exit();
5683} 5908}
5684 5909
5685module_init(scsih_init); 5910module_init(_scsih_init);
5686module_exit(scsih_exit); 5911module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 5c65da519e39..686695b155c7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -264,7 +264,7 @@ struct rep_manu_reply{
264}; 264};
265 265
266/** 266/**
267 * transport_expander_report_manufacture - obtain SMP report_manufacture 267 * _transport_expander_report_manufacture - obtain SMP report_manufacture
268 * @ioc: per adapter object 268 * @ioc: per adapter object
269 * @sas_address: expander sas address 269 * @sas_address: expander sas address
270 * @edev: the sas_expander_device object 270 * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
274 * Returns 0 for success, non-zero for failure. 274 * Returns 0 for success, non-zero for failure.
275 */ 275 */
276static int 276static int
277transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, 277_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
278 u64 sas_address, struct sas_expander_device *edev) 278 u64 sas_address, struct sas_expander_device *edev)
279{ 279{
280 Mpi2SmpPassthroughRequest_t *mpi_request; 280 Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || 578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
579 mpt2sas_port->remote_identify.device_type == 579 mpt2sas_port->remote_identify.device_type ==
580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) 580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
581 transport_expander_report_manufacture(ioc, 581 _transport_expander_report_manufacture(ioc,
582 mpt2sas_port->remote_identify.sas_address, 582 mpt2sas_port->remote_identify.sas_address,
583 rphy_to_expander_device(rphy)); 583 rphy_to_expander_device(rphy));
584 584
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
852} 852}
853 853
854/** 854/**
855 * transport_get_linkerrors - 855 * _transport_get_linkerrors -
856 * @phy: The sas phy object 856 * @phy: The sas phy object
857 * 857 *
858 * Only support sas_host direct attached phys. 858 * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
860 * 860 *
861 */ 861 */
862static int 862static int
863transport_get_linkerrors(struct sas_phy *phy) 863_transport_get_linkerrors(struct sas_phy *phy)
864{ 864{
865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
866 struct _sas_phy *mpt2sas_phy; 866 struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
903} 903}
904 904
905/** 905/**
906 * transport_get_enclosure_identifier - 906 * _transport_get_enclosure_identifier -
907 * @phy: The sas phy object 907 * @phy: The sas phy object
908 * 908 *
909 * Obtain the enclosure logical id for an expander. 909 * Obtain the enclosure logical id for an expander.
910 * Returns 0 for success, non-zero for failure. 910 * Returns 0 for success, non-zero for failure.
911 */ 911 */
912static int 912static int
913transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 913_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
914{ 914{
915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
916 struct _sas_node *sas_expander; 916 struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
929} 929}
930 930
931/** 931/**
932 * transport_get_bay_identifier - 932 * _transport_get_bay_identifier -
933 * @phy: The sas phy object 933 * @phy: The sas phy object
934 * 934 *
935 * Returns the slot id for a device that resides inside an enclosure. 935 * Returns the slot id for a device that resides inside an enclosure.
936 */ 936 */
937static int 937static int
938transport_get_bay_identifier(struct sas_rphy *rphy) 938_transport_get_bay_identifier(struct sas_rphy *rphy)
939{ 939{
940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
941 struct _sas_device *sas_device; 941 struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
953} 953}
954 954
955/** 955/**
956 * transport_phy_reset - 956 * _transport_phy_reset -
957 * @phy: The sas phy object 957 * @phy: The sas phy object
958 * @hard_reset: 958 * @hard_reset:
959 * 959 *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
961 * Returns 0 for success, non-zero for failure. 961 * Returns 0 for success, non-zero for failure.
962 */ 962 */
963static int 963static int
964transport_phy_reset(struct sas_phy *phy, int hard_reset) 964_transport_phy_reset(struct sas_phy *phy, int hard_reset)
965{ 965{
966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
967 struct _sas_phy *mpt2sas_phy; 967 struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1002} 1002}
1003 1003
1004/** 1004/**
1005 * transport_smp_handler - transport portal for smp passthru 1005 * _transport_smp_handler - transport portal for smp passthru
1006 * @shost: shost object 1006 * @shost: shost object
1007 * @rphy: sas transport rphy object 1007 * @rphy: sas transport rphy object
1008 * @req: 1008 * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1012 * smp_rep_general /sys/class/bsg/expander-5:0 1012 * smp_rep_general /sys/class/bsg/expander-5:0
1013 */ 1013 */
1014static int 1014static int
1015transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 1015_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1016 struct request *req) 1016 struct request *req)
1017{ 1017{
1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1200,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1200} 1200}
1201 1201
1202struct sas_function_template mpt2sas_transport_functions = { 1202struct sas_function_template mpt2sas_transport_functions = {
1203 .get_linkerrors = transport_get_linkerrors, 1203 .get_linkerrors = _transport_get_linkerrors,
1204 .get_enclosure_identifier = transport_get_enclosure_identifier, 1204 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1205 .get_bay_identifier = transport_get_bay_identifier, 1205 .get_bay_identifier = _transport_get_bay_identifier,
1206 .phy_reset = transport_phy_reset, 1206 .phy_reset = _transport_phy_reset,
1207 .smp_handler = transport_smp_handler, 1207 .smp_handler = _transport_smp_handler,
1208}; 1208};
1209 1209
1210struct scsi_transport_template *mpt2sas_transport_template; 1210struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1b..000000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/sas_ata.h>
42#include <asm/io.h>
43
44#define DRV_NAME "mvsas"
45#define DRV_VERSION "0.5.2"
46#define _MV_DUMP 0
47#define MVS_DISABLE_NVRAM
48#define MVS_DISABLE_MSI
49
50#define mr32(reg) readl(regs + MVS_##reg)
51#define mw32(reg,val) writel((val), regs + MVS_##reg)
52#define mw32_f(reg,val) do { \
53 writel((val), regs + MVS_##reg); \
54 readl(regs + MVS_##reg); \
55 } while (0)
56
57#define MVS_ID_NOT_MAPPED 0x7f
58#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
59
60/* offset for D2H FIS in the Received FIS List Structure */
61#define SATA_RECEIVED_D2H_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
63#define SATA_RECEIVED_PIO_FIS(reg_set) \
64 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
65#define UNASSOC_D2H_FIS(id) \
66 ((void *) mvi->rx_fis + 0x100 * id)
67
68#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
69 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
70 (__mc) != 0 && __rest; \
71 (++__lseq), (__mc) >>= 1)
72
73/* driver compile-time configuration */
74enum driver_configuration {
75 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
76 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
77 /* software requires power-of-2
78 ring size */
79
80 MVS_SLOTS = 512, /* command slots */
81 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
82 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
83 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
84 MVS_OAF_SZ = 64, /* Open address frame buffer size */
85
86 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
87
88 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
89 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
90};
91
92/* unchangeable hardware details */
93enum hardware_details {
94 MVS_MAX_PHYS = 8, /* max. possible phys */
95 MVS_MAX_PORTS = 8, /* max. possible ports */
96 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
97};
98
99/* peripheral registers (BAR2) */
100enum peripheral_registers {
101 SPI_CTL = 0x10, /* EEPROM control */
102 SPI_CMD = 0x14, /* EEPROM command */
103 SPI_DATA = 0x18, /* EEPROM data */
104};
105
106enum peripheral_register_bits {
107 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
108 TWSI_RD = (1U << 4), /* EEPROM read access */
109
110 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
111};
112
113/* enhanced mode registers (BAR4) */
114enum hw_registers {
115 MVS_GBL_CTL = 0x04, /* global control */
116 MVS_GBL_INT_STAT = 0x08, /* global irq status */
117 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
118 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
119
120 MVS_CTL = 0x100, /* SAS/SATA port configuration */
121 MVS_PCS = 0x104, /* SAS/SATA port control/status */
122 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
123 MVS_CMD_LIST_HI = 0x10C,
124 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
125 MVS_RX_FIS_HI = 0x114,
126
127 MVS_TX_CFG = 0x120, /* TX configuration */
128 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
129 MVS_TX_HI = 0x128,
130
131 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
132 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
133 MVS_RX_CFG = 0x134, /* RX configuration */
134 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
135 MVS_RX_HI = 0x13C,
136 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
137
138 MVS_INT_COAL = 0x148, /* Int coalescing config */
139 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
140 MVS_INT_STAT = 0x150, /* Central int status */
141 MVS_INT_MASK = 0x154, /* Central int enable */
142 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
143 MVS_INT_MASK_SRS = 0x15C,
144
145 /* ports 1-3 follow after this */
146 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
147 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
148 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
149 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
150
151 /* ports 1-3 follow after this */
152 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
153 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
154
155 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
156 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
157
158 /* ports 1-3 follow after this */
159 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
160 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
161 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
162 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
163
164 /* ports 1-3 follow after this */
165 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
166 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
167 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
168 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
169};
170
171enum hw_register_bits {
172 /* MVS_GBL_CTL */
173 INT_EN = (1U << 1), /* Global int enable */
174 HBA_RST = (1U << 0), /* HBA reset */
175
176 /* MVS_GBL_INT_STAT */
177 INT_XOR = (1U << 4), /* XOR engine event */
178 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
179
180 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
181 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
182 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
183 MODE_AUTO_DET_PORT6 = (1U << 14),
184 MODE_AUTO_DET_PORT5 = (1U << 13),
185 MODE_AUTO_DET_PORT4 = (1U << 12),
186 MODE_AUTO_DET_PORT3 = (1U << 11),
187 MODE_AUTO_DET_PORT2 = (1U << 10),
188 MODE_AUTO_DET_PORT1 = (1U << 9),
189 MODE_AUTO_DET_PORT0 = (1U << 8),
190 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
191 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
192 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
193 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
194 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
195 MODE_SAS_PORT6_MASK = (1U << 6),
196 MODE_SAS_PORT5_MASK = (1U << 5),
197 MODE_SAS_PORT4_MASK = (1U << 4),
198 MODE_SAS_PORT3_MASK = (1U << 3),
199 MODE_SAS_PORT2_MASK = (1U << 2),
200 MODE_SAS_PORT1_MASK = (1U << 1),
201 MODE_SAS_PORT0_MASK = (1U << 0),
202 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
203 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
204 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
205 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
206
207 /* SAS_MODE value may be
208 * dictated (in hw) by values
209 * of SATA_TARGET & AUTO_DET
210 */
211
212 /* MVS_TX_CFG */
213 TX_EN = (1U << 16), /* Enable TX */
214 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
215
216 /* MVS_RX_CFG */
217 RX_EN = (1U << 16), /* Enable RX */
218 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
219
220 /* MVS_INT_COAL */
221 COAL_EN = (1U << 16), /* Enable int coalescing */
222
223 /* MVS_INT_STAT, MVS_INT_MASK */
224 CINT_I2C = (1U << 31), /* I2C event */
225 CINT_SW0 = (1U << 30), /* software event 0 */
226 CINT_SW1 = (1U << 29), /* software event 1 */
227 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
228 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
229 CINT_MEM = (1U << 26), /* int mem parity err */
230 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
231 CINT_SRS = (1U << 3), /* SRS event */
232 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
233 CINT_DONE = (1U << 0), /* cmd completion */
234
235 /* shl for ports 1-3 */
236 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
237 CINT_PORT = (1U << 8), /* port0 event */
238 CINT_PORT_MASK_OFFSET = 8,
239 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
240
241 /* TX (delivery) ring bits */
242 TXQ_CMD_SHIFT = 29,
243 TXQ_CMD_SSP = 1, /* SSP protocol */
244 TXQ_CMD_SMP = 2, /* SMP protocol */
245 TXQ_CMD_STP = 3, /* STP/SATA protocol */
246 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
247 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
248 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
249 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
250 TXQ_SRS_SHIFT = 20, /* SATA register set */
251 TXQ_SRS_MASK = 0x7f,
252 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
253 TXQ_PHY_MASK = 0xff,
254 TXQ_SLOT_MASK = 0xfff, /* slot number */
255
256 /* RX (completion) ring bits */
257 RXQ_GOOD = (1U << 23), /* Response good */
258 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
259 RXQ_CMD_RX = (1U << 20), /* target cmd received */
260 RXQ_ATTN = (1U << 19), /* attention */
261 RXQ_RSP = (1U << 18), /* response frame xfer'd */
262 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
263 RXQ_DONE = (1U << 16), /* cmd complete */
264 RXQ_SLOT_MASK = 0xfff, /* slot number */
265
266 /* mvs_cmd_hdr bits */
267 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
268 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
269
270 /* SSP initiator only */
271 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
272
273 /* SSP initiator or target */
274 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
275
276 /* SSP target only */
277 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
278 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
279 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
280 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
281
282 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
283 MCH_FBURST = (1U << 11), /* first burst (SSP) */
284 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
285 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
286 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
287 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
288 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
289 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
290 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
291 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
292
293 CCTL_RST = (1U << 5), /* port logic reset */
294
295 /* 0(LSB first), 1(MSB first) */
296 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
297 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
298 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
299 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
300
301 /* MVS_Px_SER_CTLSTAT (per-phy control) */
302 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
303 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
304 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
305 PHY_RST = (1U << 0), /* phy reset */
306 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
307 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
308 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
309 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
310 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
311 PHY_READY_MASK = (1U << 20),
312
313 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
314 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
315 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
316 PHYEV_AN = (1U << 18), /* SATA async notification */
317 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
318 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
319 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
320 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
321 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
322 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
323 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
324 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
325 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
326 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
327 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
328 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
329 PHYEV_ID_DONE = (1U << 2), /* identify done */
330 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
331 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
332
333 /* MVS_PCS */
334 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
335 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
336 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
337 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
338 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
339 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
340 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
341 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
342 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
343 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
344
345 /* Port n Attached Device Info */
346 PORT_DEV_SSP_TRGT = (1U << 19),
347 PORT_DEV_SMP_TRGT = (1U << 18),
348 PORT_DEV_STP_TRGT = (1U << 17),
349 PORT_DEV_SSP_INIT = (1U << 11),
350 PORT_DEV_SMP_INIT = (1U << 10),
351 PORT_DEV_STP_INIT = (1U << 9),
352 PORT_PHY_ID_MASK = (0xFFU << 24),
353 PORT_DEV_TRGT_MASK = (0x7U << 17),
354 PORT_DEV_INIT_MASK = (0x7U << 9),
355 PORT_DEV_TYPE_MASK = (0x7U << 0),
356
357 /* Port n PHY Status */
358 PHY_RDY = (1U << 2),
359 PHY_DW_SYNC = (1U << 1),
360 PHY_OOB_DTCTD = (1U << 0),
361
362 /* VSR */
363 /* PHYMODE 6 (CDB) */
364 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
365 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
366 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
367 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
368 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
369 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
370 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
371 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
372 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
373 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
374 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
375 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
376 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
377 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
378};
379
380enum mvs_info_flags {
381 MVF_MSI = (1U << 0), /* MSI is enabled */
382 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
383};
384
385enum sas_cmd_port_registers {
386 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
387 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
388 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
389 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
390 CMD_OOB_SPACE = 0x110, /* OOB space control register */
391 CMD_OOB_BURST = 0x114, /* OOB burst control register */
392 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
393 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
394 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
395 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
396 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
397 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
398 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
399 CMD_ID_TEST = 0x134, /* ID test register */
400 CMD_PL_TIMER = 0x138, /* PL timer register */
401 CMD_WD_TIMER = 0x13c, /* WD timer register */
402 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
403 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
404 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
405 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
406 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
407 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
408 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
409 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
410 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
411 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
412 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
413 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
414 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
415 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
416 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
417 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
418 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
419 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
420 CMD_RESET_COUNT = 0x188, /* Reset Count */
421 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
422 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
423 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
424 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
425 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
426 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
427 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
428 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
429 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
430 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
431 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
432 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
433 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
434 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
435 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
436};
437
438/* SAS/SATA configuration port registers, aka phy registers */
439enum sas_sata_config_port_regs {
440 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
441 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
442 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
443 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
444 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
445 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
446 PHYR_SATA_CTL = 0x18, /* SATA control */
447 PHYR_PHY_STAT = 0x1C, /* PHY status */
448 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
449 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
450 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
451 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
452 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
453 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
454 PHYR_WIDE_PORT = 0x38, /* wide port participating */
455 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
456 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
457 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
458};
459
460/* SAS/SATA Vendor Specific Port Registers */
461enum sas_sata_vsp_regs {
462 VSR_PHY_STAT = 0x00, /* Phy Status */
463 VSR_PHY_MODE1 = 0x01, /* phy tx */
464 VSR_PHY_MODE2 = 0x02, /* tx scc */
465 VSR_PHY_MODE3 = 0x03, /* pll */
466 VSR_PHY_MODE4 = 0x04, /* VCO */
467 VSR_PHY_MODE5 = 0x05, /* Rx */
468 VSR_PHY_MODE6 = 0x06, /* CDR */
469 VSR_PHY_MODE7 = 0x07, /* Impedance */
470 VSR_PHY_MODE8 = 0x08, /* Voltage */
471 VSR_PHY_MODE9 = 0x09, /* Test */
472 VSR_PHY_MODE10 = 0x0A, /* Power */
473 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
474 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
475 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
476};
477
478enum pci_cfg_registers {
479 PCR_PHY_CTL = 0x40,
480 PCR_PHY_CTL2 = 0x90,
481 PCR_DEV_CTRL = 0xE8,
482};
483
484enum pci_cfg_register_bits {
485 PCTL_PWR_ON = (0xFU << 24),
486 PCTL_OFF = (0xFU << 12),
487 PRD_REQ_SIZE = (0x4000),
488 PRD_REQ_MASK = (0x00007000),
489};
490
491enum nvram_layout_offsets {
492 NVR_SIG = 0x00, /* 0xAA, 0x55 */
493 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
494};
495
496enum chip_flavors {
497 chip_6320,
498 chip_6440,
499 chip_6480,
500};
501
502enum port_type {
503 PORT_TYPE_SAS = (1L << 1),
504 PORT_TYPE_SATA = (1L << 0),
505};
506
507/* Command Table Format */
508enum ct_format {
509 /* SSP */
510 SSP_F_H = 0x00,
511 SSP_F_IU = 0x18,
512 SSP_F_MAX = 0x4D,
513 /* STP */
514 STP_CMD_FIS = 0x00,
515 STP_ATAPI_CMD = 0x40,
516 STP_F_MAX = 0x10,
517 /* SMP */
518 SMP_F_T = 0x00,
519 SMP_F_DEP = 0x01,
520 SMP_F_MAX = 0x101,
521};
522
523enum status_buffer {
524 SB_EIR_OFF = 0x00, /* Error Information Record */
525 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
526 SB_RFB_MAX = 0x400, /* RFB size*/
527};
528
529enum error_info_rec {
530 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
531 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
532 RSP_OVER = (1U << 29), /* rsp buffer overflow */
533 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
534 UNK_FIS = (1U << 27), /* unknown FIS */
535 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
536 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
537 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
538 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
539 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
540 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
541 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
542 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
543 INTERLOCK = (1U << 15), /* interlock error */
544 NAK = (1U << 14), /* NAK rx'd */
545 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
546 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
547 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
548 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
549 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
550 STP_RES_BSY = (1U << 8), /* STP resources busy */
551 BREAK = (1U << 7), /* break received */
552 BAD_DEST = (1U << 6), /* bad destination */
553 BAD_PROTO = (1U << 5), /* protocol not supported */
554 BAD_RATE = (1U << 4), /* cxn rate not supported */
555 WRONG_DEST = (1U << 3), /* wrong destination error */
556 CREDIT_TO = (1U << 2), /* credit timeout */
557 WDOG_TO = (1U << 1), /* watchdog timeout */
558 BUF_PAR = (1U << 0), /* buffer parity error */
559};
560
561enum error_info_rec_2 {
562 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
563 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
564 APP_CHK_ERR = (1U << 13), /* Application Check error */
565 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
566 USR_BLK_NM = (1U << 0), /* User Block Number */
567};
568
569struct mvs_chip_info {
570 u32 n_phy;
571 u32 srs_sz;
572 u32 slot_width;
573};
574
575struct mvs_err_info {
576 __le32 flags;
577 __le32 flags2;
578};
579
580struct mvs_prd {
581 __le64 addr; /* 64-bit buffer address */
582 __le32 reserved;
583 __le32 len; /* 16-bit length */
584};
585
586struct mvs_cmd_hdr {
587 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
588 __le32 lens; /* cmd, max resp frame len */
589 __le32 tags; /* targ port xfer tag; tag */
590 __le32 data_len; /* data xfer len */
591 __le64 cmd_tbl; /* command table address */
592 __le64 open_frame; /* open addr frame address */
593 __le64 status_buf; /* status buffer address */
594 __le64 prd_tbl; /* PRD tbl address */
595 __le32 reserved[4];
596};
597
598struct mvs_port {
599 struct asd_sas_port sas_port;
600 u8 port_attached;
601 u8 taskfileset;
602 u8 wide_port_phymap;
603 struct list_head list;
604};
605
606struct mvs_phy {
607 struct mvs_port *port;
608 struct asd_sas_phy sas_phy;
609 struct sas_identify identify;
610 struct scsi_device *sdev;
611 u64 dev_sas_addr;
612 u64 att_dev_sas_addr;
613 u32 att_dev_info;
614 u32 dev_info;
615 u32 phy_type;
616 u32 phy_status;
617 u32 irq_status;
618 u32 frame_rcvd_size;
619 u8 frame_rcvd[32];
620 u8 phy_attached;
621 enum sas_linkrate minimum_linkrate;
622 enum sas_linkrate maximum_linkrate;
623};
624
625struct mvs_slot_info {
626 struct list_head list;
627 struct sas_task *task;
628 u32 n_elem;
629 u32 tx;
630
631 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
632 * and PRD table
633 */
634 void *buf;
635 dma_addr_t buf_dma;
636#if _MV_DUMP
637 u32 cmd_size;
638#endif
639
640 void *response;
641 struct mvs_port *port;
642};
643
644struct mvs_info {
645 unsigned long flags;
646
647 spinlock_t lock; /* host-wide lock */
648 struct pci_dev *pdev; /* our device */
649 void __iomem *regs; /* enhanced mode registers */
650 void __iomem *peri_regs; /* peripheral registers */
651
652 u8 sas_addr[SAS_ADDR_SIZE];
653 struct sas_ha_struct sas; /* SCSI/SAS glue */
654 struct Scsi_Host *shost;
655
656 __le32 *tx; /* TX (delivery) DMA ring */
657 dma_addr_t tx_dma;
658 u32 tx_prod; /* cached next-producer idx */
659
660 __le32 *rx; /* RX (completion) DMA ring */
661 dma_addr_t rx_dma;
662 u32 rx_cons; /* RX consumer idx */
663
664 __le32 *rx_fis; /* RX'd FIS area */
665 dma_addr_t rx_fis_dma;
666
667 struct mvs_cmd_hdr *slot; /* DMA command header slots */
668 dma_addr_t slot_dma;
669
670 const struct mvs_chip_info *chip;
671
672 u8 tags[MVS_SLOTS];
673 struct mvs_slot_info slot_info[MVS_SLOTS];
674 /* further per-slot information */
675 struct mvs_phy phy[MVS_MAX_PHYS];
676 struct mvs_port port[MVS_MAX_PHYS];
677#ifdef MVS_USE_TASKLET
678 struct tasklet_struct tasklet;
679#endif
680};
681
682static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
683 void *funcdata);
684static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
685static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
686static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
687static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
688static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
689static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
690
691static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
692static void mvs_detect_porttype(struct mvs_info *mvi, int i);
693static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
694static void mvs_release_task(struct mvs_info *mvi, int phy_no);
695
696static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
697static void mvs_scan_start(struct Scsi_Host *);
698static int mvs_slave_configure(struct scsi_device *sdev);
699
700static struct scsi_transport_template *mvs_stt;
701
702static const struct mvs_chip_info mvs_chips[] = {
703 [chip_6320] = { 2, 16, 9 },
704 [chip_6440] = { 4, 16, 9 },
705 [chip_6480] = { 8, 32, 10 },
706};
707
708static struct scsi_host_template mvs_sht = {
709 .module = THIS_MODULE,
710 .name = DRV_NAME,
711 .queuecommand = sas_queuecommand,
712 .target_alloc = sas_target_alloc,
713 .slave_configure = mvs_slave_configure,
714 .slave_destroy = sas_slave_destroy,
715 .scan_finished = mvs_scan_finished,
716 .scan_start = mvs_scan_start,
717 .change_queue_depth = sas_change_queue_depth,
718 .change_queue_type = sas_change_queue_type,
719 .bios_param = sas_bios_param,
720 .can_queue = 1,
721 .cmd_per_lun = 1,
722 .this_id = -1,
723 .sg_tablesize = SG_ALL,
724 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
725 .use_clustering = ENABLE_CLUSTERING,
726 .eh_device_reset_handler = sas_eh_device_reset_handler,
727 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
728 .slave_alloc = sas_slave_alloc,
729 .target_destroy = sas_target_destroy,
730 .ioctl = sas_ioctl,
731};
732
733static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
734{
735 u32 i;
736 u32 run;
737 u32 offset;
738
739 offset = 0;
740 while (size) {
741 printk("%08X : ", baseaddr + offset);
742 if (size >= 16)
743 run = 16;
744 else
745 run = size;
746 size -= run;
747 for (i = 0; i < 16; i++) {
748 if (i < run)
749 printk("%02X ", (u32)data[i]);
750 else
751 printk(" ");
752 }
753 printk(": ");
754 for (i = 0; i < run; i++)
755 printk("%c", isalnum(data[i]) ? data[i] : '.');
756 printk("\n");
757 data = &data[16];
758 offset += run;
759 }
760 printk("\n");
761}
762
763#if _MV_DUMP
764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
765 enum sas_protocol proto)
766{
767 u32 offset;
768 struct pci_dev *pdev = mvi->pdev;
769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
770
771 offset = slot->cmd_size + MVS_OAF_SZ +
772 sizeof(struct mvs_prd) * slot->n_elem;
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
774 tag);
775 mvs_hexdump(32, (u8 *) slot->response,
776 (u32) slot->buf_dma + offset);
777}
778#endif
779
780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
781 enum sas_protocol proto)
782{
783#if _MV_DUMP
784 u32 sz, w_ptr;
785 u64 addr;
786 void __iomem *regs = mvi->regs;
787 struct pci_dev *pdev = mvi->pdev;
788 struct mvs_slot_info *slot = &mvi->slot_info[tag];
789
790 /*Delivery Queue */
791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
792 w_ptr = slot->tx;
793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
794 dev_printk(KERN_DEBUG, &pdev->dev,
795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
796 dev_printk(KERN_DEBUG, &pdev->dev,
797 "Delivery Queue Base Address=0x%llX (PA)"
798 "(tx_dma=0x%llX), Entry=%04d\n",
799 addr, mvi->tx_dma, w_ptr);
800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
802 /*Command List */
803 addr = mvi->slot_dma;
804 dev_printk(KERN_DEBUG, &pdev->dev,
805 "Command List Base Address=0x%llX (PA)"
806 "(slot_dma=0x%llX), Header=%03d\n",
807 addr, slot->buf_dma, tag);
808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
809 /*mvs_cmd_hdr */
810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
811 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
812 /*1.command table area */
813 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
814 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
815 /*2.open address frame area */
816 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
817 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
818 (u32) slot->buf_dma + slot->cmd_size);
819 /*3.status buffer */
820 mvs_hba_sb_dump(mvi, tag, proto);
821 /*4.PRD table */
822 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
823 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
824 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
825 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
826#endif
827}
828
829static void mvs_hba_cq_dump(struct mvs_info *mvi)
830{
831#if (_MV_DUMP > 2)
832 u64 addr;
833 void __iomem *regs = mvi->regs;
834 struct pci_dev *pdev = mvi->pdev;
835 u32 entry = mvi->rx_cons + 1;
836 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
837
838 /*Completion Queue */
839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
842 dev_printk(KERN_DEBUG, &pdev->dev,
843 "Completion List Base Address=0x%llX (PA), "
844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
845 addr, entry - 1, mvi->rx[0]);
846 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
847 mvi->rx_dma + sizeof(u32) * entry);
848#endif
849}
850
851static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
852{
853 void __iomem *regs = mvi->regs;
854 u32 tmp;
855
856 tmp = mr32(GBL_CTL);
857
858 mw32(GBL_CTL, tmp | INT_EN);
859}
860
861static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
862{
863 void __iomem *regs = mvi->regs;
864 u32 tmp;
865
866 tmp = mr32(GBL_CTL);
867
868 mw32(GBL_CTL, tmp & ~INT_EN);
869}
870
871static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
872
873/* move to PCI layer or libata core? */
874static int pci_go_64(struct pci_dev *pdev)
875{
876 int rc;
877
878 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
879 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
880 if (rc) {
881 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
882 if (rc) {
883 dev_printk(KERN_ERR, &pdev->dev,
884 "64-bit DMA enable failed\n");
885 return rc;
886 }
887 }
888 } else {
889 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
890 if (rc) {
891 dev_printk(KERN_ERR, &pdev->dev,
892 "32-bit DMA enable failed\n");
893 return rc;
894 }
895 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
896 if (rc) {
897 dev_printk(KERN_ERR, &pdev->dev,
898 "32-bit consistent DMA enable failed\n");
899 return rc;
900 }
901 }
902
903 return rc;
904}
905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
918{
919 void *bitmap = (void *) &mvi->tags;
920 clear_bit(tag, bitmap);
921}
922
923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
924{
925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
932}
933
934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
935{
936 unsigned int index, tag;
937 void *bitmap = (void *) &mvi->tags;
938
939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
940 tag = index;
941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
946}
947
948static void mvs_tag_init(struct mvs_info *mvi)
949{
950 int i;
951 for (i = 0; i < MVS_SLOTS; ++i)
952 mvs_tag_clear(mvi, i);
953}
954
955#ifndef MVS_DISABLE_NVRAM
956static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
957{
958 int timeout = 1000;
959
960 if (addr & ~SPI_ADDR_MASK)
961 return -EINVAL;
962
963 writel(addr, regs + SPI_CMD);
964 writel(TWSI_RD, regs + SPI_CTL);
965
966 while (timeout-- > 0) {
967 if (readl(regs + SPI_CTL) & TWSI_RDY) {
968 *data = readl(regs + SPI_DATA);
969 return 0;
970 }
971
972 udelay(10);
973 }
974
975 return -EBUSY;
976}
977
978static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
979 void *buf, u32 buflen)
980{
981 u32 addr_end, tmp_addr, i, j;
982 u32 tmp = 0;
983 int rc;
984 u8 *tmp8, *buf8 = buf;
985
986 addr_end = addr + buflen;
987 tmp_addr = ALIGN(addr, 4);
988 if (addr > 0xff)
989 return -EINVAL;
990
991 j = addr & 0x3;
992 if (j) {
993 rc = mvs_eep_read(regs, tmp_addr, &tmp);
994 if (rc)
995 return rc;
996
997 tmp8 = (u8 *)&tmp;
998 for (i = j; i < 4; i++)
999 *buf8++ = tmp8[i];
1000
1001 tmp_addr += 4;
1002 }
1003
1004 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1005 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1006 if (rc)
1007 return rc;
1008
1009 memcpy(buf8, &tmp, 4);
1010 buf8 += 4;
1011 }
1012
1013 if (tmp_addr < addr_end) {
1014 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1015 if (rc)
1016 return rc;
1017
1018 tmp8 = (u8 *)&tmp;
1019 j = addr_end - tmp_addr;
1020 for (i = 0; i < j; i++)
1021 *buf8++ = tmp8[i];
1022
1023 tmp_addr += 4;
1024 }
1025
1026 return 0;
1027}
1028#endif
1029
1030static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
1031 void *buf, u32 buflen)
1032{
1033#ifndef MVS_DISABLE_NVRAM
1034 void __iomem *regs = mvi->regs;
1035 int rc, i;
1036 u32 sum;
1037 u8 hdr[2], *tmp;
1038 const char *msg;
1039
1040 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1041 if (rc) {
1042 msg = "nvram hdr read failed";
1043 goto err_out;
1044 }
1045 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1046 if (rc) {
1047 msg = "nvram read failed";
1048 goto err_out;
1049 }
1050
1051 if (hdr[0] != 0x5A) {
1052 /* entry id */
1053 msg = "invalid nvram entry id";
1054 rc = -ENOENT;
1055 goto err_out;
1056 }
1057
1058 tmp = buf;
1059 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1060 for (i = 0; i < buflen; i++)
1061 sum += ((u32)tmp[i]);
1062
1063 if (sum) {
1064 msg = "nvram checksum failure";
1065 rc = -EILSEQ;
1066 goto err_out;
1067 }
1068
1069 return 0;
1070
1071err_out:
1072 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1073 return rc;
1074#else
1075 /* FIXME , For SAS target mode */
1076 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1077 return 0;
1078#endif
1079}
1080
1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1082{
1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1085
1086 if (!phy->phy_attached)
1087 return;
1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1099 if (phy->phy_type & PORT_TYPE_SAS) {
1100 struct sas_identify_frame *id;
1101
1102 id = (struct sas_identify_frame *)phy->frame_rcvd;
1103 id->dev_type = phy->identify.device_type;
1104 id->initiator_bits = SAS_PROTOCOL_ALL;
1105 id->target_bits = phy->identify.target_port_protocols;
1106 } else if (phy->phy_type & PORT_TYPE_SATA) {
1107 /* TODO */
1108 }
1109 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1110 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1111 PORTE_BYTES_DMAED);
1112}
1113
1114static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1115{
1116 /* give the phy enabling interrupt event time to come in (1s
1117 * is empirically about all it takes) */
1118 if (time < HZ)
1119 return 0;
1120 /* Wait for discovery to finish */
1121 scsi_flush_work(shost);
1122 return 1;
1123}
1124
1125static void mvs_scan_start(struct Scsi_Host *shost)
1126{
1127 int i;
1128 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1129
1130 for (i = 0; i < mvi->chip->n_phy; ++i) {
1131 mvs_bytes_dmaed(mvi, i);
1132 }
1133}
1134
1135static int mvs_slave_configure(struct scsi_device *sdev)
1136{
1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1139
1140 if (ret)
1141 return ret;
1142
1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1152}
1153
1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1155{
1156 struct pci_dev *pdev = mvi->pdev;
1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1160
1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1162 /*
1163 * events is port event now ,
1164 * we need check the interrupt status which belongs to per port.
1165 */
1166 dev_printk(KERN_DEBUG, &pdev->dev,
1167 "Port %d Event = %X\n",
1168 phy_no, phy->irq_status);
1169
1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1173 sas_phy_disconnected(sas_phy);
1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1178 } else
1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1180 }
1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1182 if (phy->irq_status & PHYEV_COMWAKE) {
1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1184 mvs_write_port_irq_mask(mvi, phy_no,
1185 tmp | PHYEV_SIG_FIS);
1186 }
1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1189 if (phy->phy_status) {
1190 mvs_detect_porttype(mvi, phy_no);
1191
1192 if (phy->phy_type & PORT_TYPE_SATA) {
1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1194 phy_no);
1195 tmp &= ~PHYEV_SIG_FIS;
1196 mvs_write_port_irq_mask(mvi,
1197 phy_no, tmp);
1198 }
1199
1200 mvs_update_phyinfo(mvi, phy_no, 0);
1201 sas_ha->notify_phy_event(sas_phy,
1202 PHYE_OOB_DONE);
1203 mvs_bytes_dmaed(mvi, phy_no);
1204 } else {
1205 dev_printk(KERN_DEBUG, &pdev->dev,
1206 "plugin interrupt but phy is gone\n");
1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1208 NULL);
1209 }
1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1212 sas_ha->notify_port_event(sas_phy,
1213 PORTE_BROADCAST_RCVD);
1214 }
1215 }
1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1217}
1218
1219static void mvs_int_sata(struct mvs_info *mvi)
1220{
1221 u32 tmp;
1222 void __iomem *regs = mvi->regs;
1223 tmp = mr32(INT_STAT_SRS);
1224 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1225}
1226
1227static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1228 u32 slot_idx)
1229{
1230 void __iomem *regs = mvi->regs;
1231 struct domain_device *dev = task->dev;
1232 struct asd_sas_port *sas_port = dev->port;
1233 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1234 u32 reg_set, phy_mask;
1235
1236 if (!sas_protocol_ata(task->task_proto)) {
1237 reg_set = 0;
1238 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1239 sas_port->phy_mask;
1240 } else {
1241 reg_set = port->taskfileset;
1242 phy_mask = sas_port->phy_mask;
1243 }
1244 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1245 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1246 (phy_mask << TXQ_PHY_SHIFT) |
1247 (reg_set << TXQ_SRS_SHIFT));
1248
1249 mw32(TX_PROD_IDX, mvi->tx_prod);
1250 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1251}
1252
1253static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1254 u32 slot_idx, int err)
1255{
1256 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1257 struct task_status_struct *tstat = &task->task_status;
1258 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1259 int stat = SAM_GOOD;
1260
1261 resp->frame_len = sizeof(struct dev_to_host_fis);
1262 memcpy(&resp->ending_fis[0],
1263 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1264 sizeof(struct dev_to_host_fis));
1265 tstat->buf_valid_size = sizeof(*resp);
1266 if (unlikely(err))
1267 stat = SAS_PROTO_RESPONSE;
1268 return stat;
1269}
1270
1271static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1272{
1273 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1274 mvs_tag_clear(mvi, slot_idx);
1275}
1276
1277static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1278 struct mvs_slot_info *slot, u32 slot_idx)
1279{
1280 if (!sas_protocol_ata(task->task_proto))
1281 if (slot->n_elem)
1282 pci_unmap_sg(mvi->pdev, task->scatter,
1283 slot->n_elem, task->data_dir);
1284
1285 switch (task->task_proto) {
1286 case SAS_PROTOCOL_SMP:
1287 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1288 PCI_DMA_FROMDEVICE);
1289 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1290 PCI_DMA_TODEVICE);
1291 break;
1292
1293 case SAS_PROTOCOL_SATA:
1294 case SAS_PROTOCOL_STP:
1295 case SAS_PROTOCOL_SSP:
1296 default:
1297 /* do nothing */
1298 break;
1299 }
1300 list_del(&slot->list);
1301 task->lldd_task = NULL;
1302 slot->task = NULL;
1303 slot->port = NULL;
1304}
1305
1306static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1307 u32 slot_idx)
1308{
1309 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1310 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1311 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1312 int stat = SAM_CHECK_COND;
1313
1314 if (err_dw1 & SLOT_BSY_ERR) {
1315 stat = SAS_QUEUE_FULL;
1316 mvs_slot_reset(mvi, task, slot_idx);
1317 }
1318 switch (task->task_proto) {
1319 case SAS_PROTOCOL_SSP:
1320 break;
1321 case SAS_PROTOCOL_SMP:
1322 break;
1323 case SAS_PROTOCOL_SATA:
1324 case SAS_PROTOCOL_STP:
1325 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1326 if (err_dw0 & TFILE_ERR)
1327 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1328 break;
1329 default:
1330 break;
1331 }
1332
1333 mvs_hexdump(16, (u8 *) slot->response, 0);
1334 return stat;
1335}
1336
1337static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1338{
1339 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1340 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1341 struct sas_task *task = slot->task;
1342 struct task_status_struct *tstat;
1343 struct mvs_port *port;
1344 bool aborted;
1345 void *to;
1346
1347 if (unlikely(!task || !task->lldd_task))
1348 return -1;
1349
1350 mvs_hba_cq_dump(mvi);
1351
1352 spin_lock(&task->task_state_lock);
1353 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1354 if (!aborted) {
1355 task->task_state_flags &=
1356 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1357 task->task_state_flags |= SAS_TASK_STATE_DONE;
1358 }
1359 spin_unlock(&task->task_state_lock);
1360
1361 if (aborted) {
1362 mvs_slot_task_free(mvi, task, slot, slot_idx);
1363 mvs_slot_free(mvi, rx_desc);
1364 return -1;
1365 }
1366
1367 port = slot->port;
1368 tstat = &task->task_status;
1369 memset(tstat, 0, sizeof(*tstat));
1370 tstat->resp = SAS_TASK_COMPLETE;
1371
1372 if (unlikely(!port->port_attached || flags)) {
1373 mvs_slot_err(mvi, task, slot_idx);
1374 if (!sas_protocol_ata(task->task_proto))
1375 tstat->stat = SAS_PHY_DOWN;
1376 goto out;
1377 }
1378
1379 /* error info record present */
1380 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1381 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1382 goto out;
1383 }
1384
1385 switch (task->task_proto) {
1386 case SAS_PROTOCOL_SSP:
1387 /* hw says status == 0, datapres == 0 */
1388 if (rx_desc & RXQ_GOOD) {
1389 tstat->stat = SAM_GOOD;
1390 tstat->resp = SAS_TASK_COMPLETE;
1391 }
1392 /* response frame present */
1393 else if (rx_desc & RXQ_RSP) {
1394 struct ssp_response_iu *iu =
1395 slot->response + sizeof(struct mvs_err_info);
1396 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1397 }
1398
1399 /* should never happen? */
1400 else
1401 tstat->stat = SAM_CHECK_COND;
1402 break;
1403
1404 case SAS_PROTOCOL_SMP: {
1405 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1406 tstat->stat = SAM_GOOD;
1407 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1408 memcpy(to + sg_resp->offset,
1409 slot->response + sizeof(struct mvs_err_info),
1410 sg_dma_len(sg_resp));
1411 kunmap_atomic(to, KM_IRQ0);
1412 break;
1413 }
1414
1415 case SAS_PROTOCOL_SATA:
1416 case SAS_PROTOCOL_STP:
1417 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1418 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1419 break;
1420 }
1421
1422 default:
1423 tstat->stat = SAM_CHECK_COND;
1424 break;
1425 }
1426
1427out:
1428 mvs_slot_task_free(mvi, task, slot, slot_idx);
1429 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1430 mvs_slot_free(mvi, rx_desc);
1431
1432 spin_unlock(&mvi->lock);
1433 task->task_done(task);
1434 spin_lock(&mvi->lock);
1435 return tstat->stat;
1436}
1437
1438static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1439{
1440 struct list_head *pos, *n;
1441 struct mvs_slot_info *slot;
1442 struct mvs_phy *phy = &mvi->phy[phy_no];
1443 struct mvs_port *port = phy->port;
1444 u32 rx_desc;
1445
1446 if (!port)
1447 return;
1448
1449 list_for_each_safe(pos, n, &port->list) {
1450 slot = container_of(pos, struct mvs_slot_info, list);
1451 rx_desc = (u32) (slot - mvi->slot_info);
1452 mvs_slot_complete(mvi, rx_desc, 1);
1453 }
1454}
1455
1456static void mvs_int_full(struct mvs_info *mvi)
1457{
1458 void __iomem *regs = mvi->regs;
1459 u32 tmp, stat;
1460 int i;
1461
1462 stat = mr32(INT_STAT);
1463
1464 mvs_int_rx(mvi, false);
1465
1466 for (i = 0; i < MVS_MAX_PORTS; i++) {
1467 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1468 if (tmp)
1469 mvs_int_port(mvi, i, tmp);
1470 }
1471
1472 if (stat & CINT_SRS)
1473 mvs_int_sata(mvi);
1474
1475 mw32(INT_STAT, stat);
1476}
1477
1478static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1479{
1480 void __iomem *regs = mvi->regs;
1481 u32 rx_prod_idx, rx_desc;
1482 bool attn = false;
1483 struct pci_dev *pdev = mvi->pdev;
1484
1485 /* the first dword in the RX ring is special: it contains
1486 * a mirror of the hardware's RX producer index, so that
1487 * we don't have to stall the CPU reading that register.
1488 * The actual RX ring is offset by one dword, due to this.
1489 */
1490 rx_prod_idx = mvi->rx_cons;
1491 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1492 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1493 return 0;
1494
1495 /* The CMPL_Q may come late, read from register and try again
1496 * note: if coalescing is enabled,
1497 * it will need to read from register every time for sure
1498 */
1499 if (mvi->rx_cons == rx_prod_idx)
1500 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1501
1502 if (mvi->rx_cons == rx_prod_idx)
1503 return 0;
1504
1505 while (mvi->rx_cons != rx_prod_idx) {
1506
1507 /* increment our internal RX consumer pointer */
1508 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1509
1510 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1511
1512 if (likely(rx_desc & RXQ_DONE))
1513 mvs_slot_complete(mvi, rx_desc, 0);
1514 if (rx_desc & RXQ_ATTN) {
1515 attn = true;
1516 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1517 rx_desc);
1518 } else if (rx_desc & RXQ_ERR) {
1519 if (!(rx_desc & RXQ_DONE))
1520 mvs_slot_complete(mvi, rx_desc, 0);
1521 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1522 rx_desc);
1523 } else if (rx_desc & RXQ_SLOT_RESET) {
1524 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1525 rx_desc);
1526 mvs_slot_free(mvi, rx_desc);
1527 }
1528 }
1529
1530 if (attn && self_clear)
1531 mvs_int_full(mvi);
1532
1533 return 0;
1534}
1535
1536#ifdef MVS_USE_TASKLET
1537static void mvs_tasklet(unsigned long data)
1538{
1539 struct mvs_info *mvi = (struct mvs_info *) data;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&mvi->lock, flags);
1543
1544#ifdef MVS_DISABLE_MSI
1545 mvs_int_full(mvi);
1546#else
1547 mvs_int_rx(mvi, true);
1548#endif
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550}
1551#endif
1552
1553static irqreturn_t mvs_interrupt(int irq, void *opaque)
1554{
1555 struct mvs_info *mvi = opaque;
1556 void __iomem *regs = mvi->regs;
1557 u32 stat;
1558
1559 stat = mr32(GBL_INT_STAT);
1560
1561 if (stat == 0 || stat == 0xffffffff)
1562 return IRQ_NONE;
1563
1564 /* clear CMD_CMPLT ASAP */
1565 mw32_f(INT_STAT, CINT_DONE);
1566
1567#ifndef MVS_USE_TASKLET
1568 spin_lock(&mvi->lock);
1569
1570 mvs_int_full(mvi);
1571
1572 spin_unlock(&mvi->lock);
1573#else
1574 tasklet_schedule(&mvi->tasklet);
1575#endif
1576 return IRQ_HANDLED;
1577}
1578
1579#ifndef MVS_DISABLE_MSI
1580static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1581{
1582 struct mvs_info *mvi = opaque;
1583
1584#ifndef MVS_USE_TASKLET
1585 spin_lock(&mvi->lock);
1586
1587 mvs_int_rx(mvi, true);
1588
1589 spin_unlock(&mvi->lock);
1590#else
1591 tasklet_schedule(&mvi->tasklet);
1592#endif
1593 return IRQ_HANDLED;
1594}
1595#endif
1596
1597struct mvs_task_exec_info {
1598 struct sas_task *task;
1599 struct mvs_cmd_hdr *hdr;
1600 struct mvs_port *port;
1601 u32 tag;
1602 int n_elem;
1603};
1604
1605static int mvs_task_prep_smp(struct mvs_info *mvi,
1606 struct mvs_task_exec_info *tei)
1607{
1608 int elem, rc, i;
1609 struct sas_task *task = tei->task;
1610 struct mvs_cmd_hdr *hdr = tei->hdr;
1611 struct scatterlist *sg_req, *sg_resp;
1612 u32 req_len, resp_len, tag = tei->tag;
1613 void *buf_tmp;
1614 u8 *buf_oaf;
1615 dma_addr_t buf_tmp_dma;
1616 struct mvs_prd *buf_prd;
1617 struct scatterlist *sg;
1618 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1619 struct asd_sas_port *sas_port = task->dev->port;
1620 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1621#if _MV_DUMP
1622 u8 *buf_cmd;
1623 void *from;
1624#endif
1625 /*
1626 * DMA-map SMP request, response buffers
1627 */
1628 sg_req = &task->smp_task.smp_req;
1629 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1630 if (!elem)
1631 return -ENOMEM;
1632 req_len = sg_dma_len(sg_req);
1633
1634 sg_resp = &task->smp_task.smp_resp;
1635 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1636 if (!elem) {
1637 rc = -ENOMEM;
1638 goto err_out;
1639 }
1640 resp_len = sg_dma_len(sg_resp);
1641
1642 /* must be in dwords */
1643 if ((req_len & 0x3) || (resp_len & 0x3)) {
1644 rc = -EINVAL;
1645 goto err_out_2;
1646 }
1647
1648 /*
1649 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1650 */
1651
1652 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1653 buf_tmp = slot->buf;
1654 buf_tmp_dma = slot->buf_dma;
1655
1656#if _MV_DUMP
1657 buf_cmd = buf_tmp;
1658 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1659 buf_tmp += req_len;
1660 buf_tmp_dma += req_len;
1661 slot->cmd_size = req_len;
1662#else
1663 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1664#endif
1665
1666 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1667 buf_oaf = buf_tmp;
1668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1669
1670 buf_tmp += MVS_OAF_SZ;
1671 buf_tmp_dma += MVS_OAF_SZ;
1672
1673 /* region 3: PRD table ********************************************* */
1674 buf_prd = buf_tmp;
1675 if (tei->n_elem)
1676 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1677 else
1678 hdr->prd_tbl = 0;
1679
1680 i = sizeof(struct mvs_prd) * tei->n_elem;
1681 buf_tmp += i;
1682 buf_tmp_dma += i;
1683
1684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1685 slot->response = buf_tmp;
1686 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1687
1688 /*
1689 * Fill in TX ring and command slot header
1690 */
1691 slot->tx = mvi->tx_prod;
1692 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1693 TXQ_MODE_I | tag |
1694 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1695
1696 hdr->flags |= flags;
1697 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1698 hdr->tags = cpu_to_le32(tag);
1699 hdr->data_len = 0;
1700
1701 /* generate open address frame hdr (first 12 bytes) */
1702 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1703 buf_oaf[1] = task->dev->linkrate & 0xf;
1704 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1705 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1706
1707 /* fill in PRD (scatter/gather) table, if any */
1708 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1709 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1710 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1711 buf_prd++;
1712 }
1713
1714#if _MV_DUMP
1715 /* copy cmd table */
1716 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1717 memcpy(buf_cmd, from + sg_req->offset, req_len);
1718 kunmap_atomic(from, KM_IRQ0);
1719#endif
1720 return 0;
1721
1722err_out_2:
1723 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1724 PCI_DMA_FROMDEVICE);
1725err_out:
1726 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1727 PCI_DMA_TODEVICE);
1728 return rc;
1729}
1730
1731static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1732{
1733 void __iomem *regs = mvi->regs;
1734 u32 tmp, offs;
1735 u8 *tfs = &port->taskfileset;
1736
1737 if (*tfs == MVS_ID_NOT_MAPPED)
1738 return;
1739
1740 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1741 if (*tfs < 16) {
1742 tmp = mr32(PCS);
1743 mw32(PCS, tmp & ~offs);
1744 } else {
1745 tmp = mr32(CTL);
1746 mw32(CTL, tmp & ~offs);
1747 }
1748
1749 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1750 if (tmp)
1751 mw32(INT_STAT_SRS, tmp);
1752
1753 *tfs = MVS_ID_NOT_MAPPED;
1754}
1755
1756static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1757{
1758 int i;
1759 u32 tmp, offs;
1760 void __iomem *regs = mvi->regs;
1761
1762 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1763 return 0;
1764
1765 tmp = mr32(PCS);
1766
1767 for (i = 0; i < mvi->chip->srs_sz; i++) {
1768 if (i == 16)
1769 tmp = mr32(CTL);
1770 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1771 if (!(tmp & offs)) {
1772 port->taskfileset = i;
1773
1774 if (i < 16)
1775 mw32(PCS, tmp | offs);
1776 else
1777 mw32(CTL, tmp | offs);
1778 tmp = mr32(INT_STAT_SRS) & (1U << i);
1779 if (tmp)
1780 mw32(INT_STAT_SRS, tmp);
1781 return 0;
1782 }
1783 }
1784 return MVS_ID_NOT_MAPPED;
1785}
1786
1787static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
1788{
1789 struct ata_queued_cmd *qc = task->uldd_task;
1790
1791 if (qc) {
1792 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1793 qc->tf.command == ATA_CMD_FPDMA_READ) {
1794 *tag = qc->tag;
1795 return 1;
1796 }
1797 }
1798
1799 return 0;
1800}
1801
1802static int mvs_task_prep_ata(struct mvs_info *mvi,
1803 struct mvs_task_exec_info *tei)
1804{
1805 struct sas_task *task = tei->task;
1806 struct domain_device *dev = task->dev;
1807 struct mvs_cmd_hdr *hdr = tei->hdr;
1808 struct asd_sas_port *sas_port = dev->port;
1809 struct mvs_slot_info *slot;
1810 struct scatterlist *sg;
1811 struct mvs_prd *buf_prd;
1812 struct mvs_port *port = tei->port;
1813 u32 tag = tei->tag;
1814 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1815 void *buf_tmp;
1816 u8 *buf_cmd, *buf_oaf;
1817 dma_addr_t buf_tmp_dma;
1818 u32 i, req_len, resp_len;
1819 const u32 max_resp_len = SB_RFB_MAX;
1820
1821 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1822 return -EBUSY;
1823
1824 slot = &mvi->slot_info[tag];
1825 slot->tx = mvi->tx_prod;
1826 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1827 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1828 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1829 (port->taskfileset << TXQ_SRS_SHIFT));
1830
1831 if (task->ata_task.use_ncq)
1832 flags |= MCH_FPDMA;
1833 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1834 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1835 flags |= MCH_ATAPI;
1836 }
1837
1838 /* FIXME: fill in port multiplier number */
1839
1840 hdr->flags = cpu_to_le32(flags);
1841
1842 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1843 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
1844 task->ata_task.fis.sector_count |= hdr->tags << 3;
1845 else
1846 hdr->tags = cpu_to_le32(tag);
1847 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1848
1849 /*
1850 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1851 */
1852
1853 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1854 buf_cmd = buf_tmp = slot->buf;
1855 buf_tmp_dma = slot->buf_dma;
1856
1857 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1858
1859 buf_tmp += MVS_ATA_CMD_SZ;
1860 buf_tmp_dma += MVS_ATA_CMD_SZ;
1861#if _MV_DUMP
1862 slot->cmd_size = MVS_ATA_CMD_SZ;
1863#endif
1864
1865 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1866 /* used for STP. unused for SATA? */
1867 buf_oaf = buf_tmp;
1868 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1869
1870 buf_tmp += MVS_OAF_SZ;
1871 buf_tmp_dma += MVS_OAF_SZ;
1872
1873 /* region 3: PRD table ********************************************* */
1874 buf_prd = buf_tmp;
1875 if (tei->n_elem)
1876 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1877 else
1878 hdr->prd_tbl = 0;
1879
1880 i = sizeof(struct mvs_prd) * tei->n_elem;
1881 buf_tmp += i;
1882 buf_tmp_dma += i;
1883
1884 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1885 /* FIXME: probably unused, for SATA. kept here just in case
1886 * we get a STP/SATA error information record
1887 */
1888 slot->response = buf_tmp;
1889 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1890
1891 req_len = sizeof(struct host_to_dev_fis);
1892 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1893 sizeof(struct mvs_err_info) - i;
1894
1895 /* request, response lengths */
1896 resp_len = min(resp_len, max_resp_len);
1897 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1898
1899 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1900 /* fill in command FIS and ATAPI CDB */
1901 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1902 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1903 memcpy(buf_cmd + STP_ATAPI_CMD,
1904 task->ata_task.atapi_packet, 16);
1905
1906 /* generate open address frame hdr (first 12 bytes) */
1907 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1908 buf_oaf[1] = task->dev->linkrate & 0xf;
1909 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1910 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1911
1912 /* fill in PRD (scatter/gather) table, if any */
1913 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1914 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1915 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1916 buf_prd++;
1917 }
1918
1919 return 0;
1920}
1921
1922static int mvs_task_prep_ssp(struct mvs_info *mvi,
1923 struct mvs_task_exec_info *tei)
1924{
1925 struct sas_task *task = tei->task;
1926 struct mvs_cmd_hdr *hdr = tei->hdr;
1927 struct mvs_port *port = tei->port;
1928 struct mvs_slot_info *slot;
1929 struct scatterlist *sg;
1930 struct mvs_prd *buf_prd;
1931 struct ssp_frame_hdr *ssp_hdr;
1932 void *buf_tmp;
1933 u8 *buf_cmd, *buf_oaf, fburst = 0;
1934 dma_addr_t buf_tmp_dma;
1935 u32 flags;
1936 u32 resp_len, req_len, i, tag = tei->tag;
1937 const u32 max_resp_len = SB_RFB_MAX;
1938 u8 phy_mask;
1939
1940 slot = &mvi->slot_info[tag];
1941
1942 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1943 task->dev->port->phy_mask;
1944 slot->tx = mvi->tx_prod;
1945 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1946 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1947 (phy_mask << TXQ_PHY_SHIFT));
1948
1949 flags = MCH_RETRY;
1950 if (task->ssp_task.enable_first_burst) {
1951 flags |= MCH_FBURST;
1952 fburst = (1 << 7);
1953 }
1954 hdr->flags = cpu_to_le32(flags |
1955 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1956 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1957
1958 hdr->tags = cpu_to_le32(tag);
1959 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1960
1961 /*
1962 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1963 */
1964
1965 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1966 buf_cmd = buf_tmp = slot->buf;
1967 buf_tmp_dma = slot->buf_dma;
1968
1969 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1970
1971 buf_tmp += MVS_SSP_CMD_SZ;
1972 buf_tmp_dma += MVS_SSP_CMD_SZ;
1973#if _MV_DUMP
1974 slot->cmd_size = MVS_SSP_CMD_SZ;
1975#endif
1976
1977 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1978 buf_oaf = buf_tmp;
1979 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1980
1981 buf_tmp += MVS_OAF_SZ;
1982 buf_tmp_dma += MVS_OAF_SZ;
1983
1984 /* region 3: PRD table ********************************************* */
1985 buf_prd = buf_tmp;
1986 if (tei->n_elem)
1987 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1988 else
1989 hdr->prd_tbl = 0;
1990
1991 i = sizeof(struct mvs_prd) * tei->n_elem;
1992 buf_tmp += i;
1993 buf_tmp_dma += i;
1994
1995 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1996 slot->response = buf_tmp;
1997 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1998
1999 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
2000 sizeof(struct mvs_err_info) - i;
2001 resp_len = min(resp_len, max_resp_len);
2002
2003 req_len = sizeof(struct ssp_frame_hdr) + 28;
2004
2005 /* request, response lengths */
2006 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
2007
2008 /* generate open address frame hdr (first 12 bytes) */
2009 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
2010 buf_oaf[1] = task->dev->linkrate & 0xf;
2011 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
2012 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
2013
2014 /* fill in SSP frame header (Command Table.SSP frame header) */
2015 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
2016 ssp_hdr->frame_type = SSP_COMMAND;
2017 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
2018 HASHED_SAS_ADDR_SIZE);
2019 memcpy(ssp_hdr->hashed_src_addr,
2020 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
2021 ssp_hdr->tag = cpu_to_be16(tag);
2022
2023 /* fill in command frame IU */
2024 buf_cmd += sizeof(*ssp_hdr);
2025 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
2026 buf_cmd[9] = fburst | task->ssp_task.task_attr |
2027 (task->ssp_task.task_prio << 3);
2028 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
2029
2030 /* fill in PRD (scatter/gather) table, if any */
2031 for_each_sg(task->scatter, sg, tei->n_elem, i) {
2032 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
2033 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
2034 buf_prd++;
2035 }
2036
2037 return 0;
2038}
2039
2040static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
2041{
2042 struct domain_device *dev = task->dev;
2043 struct mvs_info *mvi = dev->port->ha->lldd_ha;
2044 struct pci_dev *pdev = mvi->pdev;
2045 void __iomem *regs = mvi->regs;
2046 struct mvs_task_exec_info tei;
2047 struct sas_task *t = task;
2048 struct mvs_slot_info *slot;
2049 u32 tag = 0xdeadbeef, rc, n_elem = 0;
2050 unsigned long flags;
2051 u32 n = num, pass = 0;
2052
2053 spin_lock_irqsave(&mvi->lock, flags);
2054 do {
2055 dev = t->dev;
2056 tei.port = &mvi->port[dev->port->id];
2057
2058 if (!tei.port->port_attached) {
2059 if (sas_protocol_ata(t->task_proto)) {
2060 rc = SAS_PHY_DOWN;
2061 goto out_done;
2062 } else {
2063 struct task_status_struct *ts = &t->task_status;
2064 ts->resp = SAS_TASK_UNDELIVERED;
2065 ts->stat = SAS_PHY_DOWN;
2066 t->task_done(t);
2067 if (n > 1)
2068 t = list_entry(t->list.next,
2069 struct sas_task, list);
2070 continue;
2071 }
2072 }
2073
2074 if (!sas_protocol_ata(t->task_proto)) {
2075 if (t->num_scatter) {
2076 n_elem = pci_map_sg(mvi->pdev, t->scatter,
2077 t->num_scatter,
2078 t->data_dir);
2079 if (!n_elem) {
2080 rc = -ENOMEM;
2081 goto err_out;
2082 }
2083 }
2084 } else {
2085 n_elem = t->num_scatter;
2086 }
2087
2088 rc = mvs_tag_alloc(mvi, &tag);
2089 if (rc)
2090 goto err_out;
2091
2092 slot = &mvi->slot_info[tag];
2093 t->lldd_task = NULL;
2094 slot->n_elem = n_elem;
2095 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2096 tei.task = t;
2097 tei.hdr = &mvi->slot[tag];
2098 tei.tag = tag;
2099 tei.n_elem = n_elem;
2100
2101 switch (t->task_proto) {
2102 case SAS_PROTOCOL_SMP:
2103 rc = mvs_task_prep_smp(mvi, &tei);
2104 break;
2105 case SAS_PROTOCOL_SSP:
2106 rc = mvs_task_prep_ssp(mvi, &tei);
2107 break;
2108 case SAS_PROTOCOL_SATA:
2109 case SAS_PROTOCOL_STP:
2110 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2111 rc = mvs_task_prep_ata(mvi, &tei);
2112 break;
2113 default:
2114 dev_printk(KERN_ERR, &pdev->dev,
2115 "unknown sas_task proto: 0x%x\n",
2116 t->task_proto);
2117 rc = -EINVAL;
2118 break;
2119 }
2120
2121 if (rc)
2122 goto err_out_tag;
2123
2124 slot->task = t;
2125 slot->port = tei.port;
2126 t->lldd_task = (void *) slot;
2127 list_add_tail(&slot->list, &slot->port->list);
2128 /* TODO: select normal or high priority */
2129
2130 spin_lock(&t->task_state_lock);
2131 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
2132 spin_unlock(&t->task_state_lock);
2133
2134 mvs_hba_memory_dump(mvi, tag, t->task_proto);
2135
2136 ++pass;
2137 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
2138 if (n > 1)
2139 t = list_entry(t->list.next, struct sas_task, list);
2140 } while (--n);
2141
2142 rc = 0;
2143 goto out_done;
2144
2145err_out_tag:
2146 mvs_tag_free(mvi, tag);
2147err_out:
2148 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
2149 if (!sas_protocol_ata(t->task_proto))
2150 if (n_elem)
2151 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
2152 t->data_dir);
2153out_done:
2154 if (pass)
2155 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
2156 spin_unlock_irqrestore(&mvi->lock, flags);
2157 return rc;
2158}
2159
2160static int mvs_task_abort(struct sas_task *task)
2161{
2162 int rc;
2163 unsigned long flags;
2164 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
2165 struct pci_dev *pdev = mvi->pdev;
2166 int tag;
2167
2168 spin_lock_irqsave(&task->task_state_lock, flags);
2169 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
2170 rc = TMF_RESP_FUNC_COMPLETE;
2171 spin_unlock_irqrestore(&task->task_state_lock, flags);
2172 goto out_done;
2173 }
2174 spin_unlock_irqrestore(&task->task_state_lock, flags);
2175
2176 switch (task->task_proto) {
2177 case SAS_PROTOCOL_SMP:
2178 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
2179 break;
2180 case SAS_PROTOCOL_SSP:
2181 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
2182 break;
2183 case SAS_PROTOCOL_SATA:
2184 case SAS_PROTOCOL_STP:
2185 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
2186 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
2187#if _MV_DUMP
2188 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
2189 mvs_hexdump(sizeof(struct host_to_dev_fis),
2190 (void *)&task->ata_task.fis, 0);
2191 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
2192 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
2193#endif
2194 spin_lock_irqsave(&task->task_state_lock, flags);
2195 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
2196 /* TODO */
2197 ;
2198 }
2199 spin_unlock_irqrestore(&task->task_state_lock, flags);
2200 break;
2201 }
2202 default:
2203 break;
2204 }
2205
2206 if (mvs_find_tag(mvi, task, &tag)) {
2207 spin_lock_irqsave(&mvi->lock, flags);
2208 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
2209 spin_unlock_irqrestore(&mvi->lock, flags);
2210 }
2211 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
2212 rc = TMF_RESP_FUNC_COMPLETE;
2213 else
2214 rc = TMF_RESP_FUNC_FAILED;
2215out_done:
2216 return rc;
2217}
2218
2219static void mvs_free(struct mvs_info *mvi)
2220{
2221 int i;
2222
2223 if (!mvi)
2224 return;
2225
2226 for (i = 0; i < MVS_SLOTS; i++) {
2227 struct mvs_slot_info *slot = &mvi->slot_info[i];
2228
2229 if (slot->buf)
2230 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
2231 slot->buf, slot->buf_dma);
2232 }
2233
2234 if (mvi->tx)
2235 dma_free_coherent(&mvi->pdev->dev,
2236 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2237 mvi->tx, mvi->tx_dma);
2238 if (mvi->rx_fis)
2239 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2240 mvi->rx_fis, mvi->rx_fis_dma);
2241 if (mvi->rx)
2242 dma_free_coherent(&mvi->pdev->dev,
2243 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2244 mvi->rx, mvi->rx_dma);
2245 if (mvi->slot)
2246 dma_free_coherent(&mvi->pdev->dev,
2247 sizeof(*mvi->slot) * MVS_SLOTS,
2248 mvi->slot, mvi->slot_dma);
2249#ifdef MVS_ENABLE_PERI
2250 if (mvi->peri_regs)
2251 iounmap(mvi->peri_regs);
2252#endif
2253 if (mvi->regs)
2254 iounmap(mvi->regs);
2255 if (mvi->shost)
2256 scsi_host_put(mvi->shost);
2257 kfree(mvi->sas.sas_port);
2258 kfree(mvi->sas.sas_phy);
2259 kfree(mvi);
2260}
2261
2262/* FIXME: locking? */
2263static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2264 void *funcdata)
2265{
2266 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2267 int rc = 0, phy_id = sas_phy->id;
2268 u32 tmp;
2269
2270 tmp = mvs_read_phy_ctl(mvi, phy_id);
2271
2272 switch (func) {
2273 case PHY_FUNC_SET_LINK_RATE:{
2274 struct sas_phy_linkrates *rates = funcdata;
2275 u32 lrmin = 0, lrmax = 0;
2276
2277 lrmin = (rates->minimum_linkrate << 8);
2278 lrmax = (rates->maximum_linkrate << 12);
2279
2280 if (lrmin) {
2281 tmp &= ~(0xf << 8);
2282 tmp |= lrmin;
2283 }
2284 if (lrmax) {
2285 tmp &= ~(0xf << 12);
2286 tmp |= lrmax;
2287 }
2288 mvs_write_phy_ctl(mvi, phy_id, tmp);
2289 break;
2290 }
2291
2292 case PHY_FUNC_HARD_RESET:
2293 if (tmp & PHY_RST_HARD)
2294 break;
2295 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2296 break;
2297
2298 case PHY_FUNC_LINK_RESET:
2299 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2300 break;
2301
2302 case PHY_FUNC_DISABLE:
2303 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2304 default:
2305 rc = -EOPNOTSUPP;
2306 }
2307
2308 return rc;
2309}
2310
2311static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2312{
2313 struct mvs_phy *phy = &mvi->phy[phy_id];
2314 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2315
2316 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2317 sas_phy->class = SAS;
2318 sas_phy->iproto = SAS_PROTOCOL_ALL;
2319 sas_phy->tproto = 0;
2320 sas_phy->type = PHY_TYPE_PHYSICAL;
2321 sas_phy->role = PHY_ROLE_INITIATOR;
2322 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2323 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2324
2325 sas_phy->id = phy_id;
2326 sas_phy->sas_addr = &mvi->sas_addr[0];
2327 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2328 sas_phy->ha = &mvi->sas;
2329 sas_phy->lldd_phy = phy;
2330}
2331
2332static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2333 const struct pci_device_id *ent)
2334{
2335 struct mvs_info *mvi;
2336 unsigned long res_start, res_len, res_flag;
2337 struct asd_sas_phy **arr_phy;
2338 struct asd_sas_port **arr_port;
2339 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2340 int i;
2341
2342 /*
2343 * alloc and init our per-HBA mvs_info struct
2344 */
2345
2346 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2347 if (!mvi)
2348 return NULL;
2349
2350 spin_lock_init(&mvi->lock);
2351#ifdef MVS_USE_TASKLET
2352 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
2353#endif
2354 mvi->pdev = pdev;
2355 mvi->chip = chip;
2356
2357 if (pdev->device == 0x6440 && pdev->revision == 0)
2358 mvi->flags |= MVF_PHY_PWR_FIX;
2359
2360 /*
2361 * alloc and init SCSI, SAS glue
2362 */
2363
2364 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2365 if (!mvi->shost)
2366 goto err_out;
2367
2368 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2369 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2370 if (!arr_phy || !arr_port)
2371 goto err_out;
2372
2373 for (i = 0; i < MVS_MAX_PHYS; i++) {
2374 mvs_phy_init(mvi, i);
2375 arr_phy[i] = &mvi->phy[i].sas_phy;
2376 arr_port[i] = &mvi->port[i].sas_port;
2377 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
2378 mvi->port[i].wide_port_phymap = 0;
2379 mvi->port[i].port_attached = 0;
2380 INIT_LIST_HEAD(&mvi->port[i].list);
2381 }
2382
2383 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2384 mvi->shost->transportt = mvs_stt;
2385 mvi->shost->max_id = 21;
2386 mvi->shost->max_lun = ~0;
2387 mvi->shost->max_channel = 0;
2388 mvi->shost->max_cmd_len = 16;
2389
2390 mvi->sas.sas_ha_name = DRV_NAME;
2391 mvi->sas.dev = &pdev->dev;
2392 mvi->sas.lldd_module = THIS_MODULE;
2393 mvi->sas.sas_addr = &mvi->sas_addr[0];
2394 mvi->sas.sas_phy = arr_phy;
2395 mvi->sas.sas_port = arr_port;
2396 mvi->sas.num_phys = chip->n_phy;
2397 mvi->sas.lldd_max_execute_num = 1;
2398 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2399 mvi->shost->can_queue = MVS_CAN_QUEUE;
2400 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
2401 mvi->sas.lldd_ha = mvi;
2402 mvi->sas.core.shost = mvi->shost;
2403
2404 mvs_tag_init(mvi);
2405
2406 /*
2407 * ioremap main and peripheral registers
2408 */
2409
2410#ifdef MVS_ENABLE_PERI
2411 res_start = pci_resource_start(pdev, 2);
2412 res_len = pci_resource_len(pdev, 2);
2413 if (!res_start || !res_len)
2414 goto err_out;
2415
2416 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2417 if (!mvi->peri_regs)
2418 goto err_out;
2419#endif
2420
2421 res_start = pci_resource_start(pdev, 4);
2422 res_len = pci_resource_len(pdev, 4);
2423 if (!res_start || !res_len)
2424 goto err_out;
2425
2426 res_flag = pci_resource_flags(pdev, 4);
2427 if (res_flag & IORESOURCE_CACHEABLE)
2428 mvi->regs = ioremap(res_start, res_len);
2429 else
2430 mvi->regs = ioremap_nocache(res_start, res_len);
2431
2432 if (!mvi->regs)
2433 goto err_out;
2434
2435 /*
2436 * alloc and init our DMA areas
2437 */
2438
2439 mvi->tx = dma_alloc_coherent(&pdev->dev,
2440 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2441 &mvi->tx_dma, GFP_KERNEL);
2442 if (!mvi->tx)
2443 goto err_out;
2444 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2445
2446 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2447 &mvi->rx_fis_dma, GFP_KERNEL);
2448 if (!mvi->rx_fis)
2449 goto err_out;
2450 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2451
2452 mvi->rx = dma_alloc_coherent(&pdev->dev,
2453 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2454 &mvi->rx_dma, GFP_KERNEL);
2455 if (!mvi->rx)
2456 goto err_out;
2457 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
2458
2459 mvi->rx[0] = cpu_to_le32(0xfff);
2460 mvi->rx_cons = 0xfff;
2461
2462 mvi->slot = dma_alloc_coherent(&pdev->dev,
2463 sizeof(*mvi->slot) * MVS_SLOTS,
2464 &mvi->slot_dma, GFP_KERNEL);
2465 if (!mvi->slot)
2466 goto err_out;
2467 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2468
2469 for (i = 0; i < MVS_SLOTS; i++) {
2470 struct mvs_slot_info *slot = &mvi->slot_info[i];
2471
2472 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2473 &slot->buf_dma, GFP_KERNEL);
2474 if (!slot->buf)
2475 goto err_out;
2476 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2477 }
2478
2479 /* finally, read NVRAM to get our SAS address */
2480 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2481 goto err_out;
2482 return mvi;
2483
2484err_out:
2485 mvs_free(mvi);
2486 return NULL;
2487}
2488
2489static u32 mvs_cr32(void __iomem *regs, u32 addr)
2490{
2491 mw32(CMD_ADDR, addr);
2492 return mr32(CMD_DATA);
2493}
2494
2495static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2496{
2497 mw32(CMD_ADDR, addr);
2498 mw32(CMD_DATA, val);
2499}
2500
2501static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2502{
2503 void __iomem *regs = mvi->regs;
2504 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2505 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2506}
2507
2508static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2509{
2510 void __iomem *regs = mvi->regs;
2511 if (port < 4)
2512 mw32(P0_SER_CTLSTAT + port * 4, val);
2513 else
2514 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2515}
2516
2517static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2518{
2519 void __iomem *regs = mvi->regs + off;
2520 void __iomem *regs2 = mvi->regs + off2;
2521 return (port < 4)?readl(regs + port * 8):
2522 readl(regs2 + (port - 4) * 8);
2523}
2524
2525static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2526 u32 port, u32 val)
2527{
2528 void __iomem *regs = mvi->regs + off;
2529 void __iomem *regs2 = mvi->regs + off2;
2530 if (port < 4)
2531 writel(val, regs + port * 8);
2532 else
2533 writel(val, regs2 + (port - 4) * 8);
2534}
2535
2536static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2537{
2538 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2539}
2540
2541static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2542{
2543 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2544}
2545
2546static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2547{
2548 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2549}
2550
2551static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2552{
2553 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2554}
2555
2556static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2557{
2558 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2559}
2560
2561static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2562{
2563 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2564}
2565
2566static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2567{
2568 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2569}
2570
2571static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2572{
2573 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2574}
2575
2576static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2577{
2578 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2579}
2580
2581static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2582{
2583 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2584}
2585
2586static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2587{
2588 void __iomem *regs = mvi->regs;
2589 u32 tmp;
2590
2591 /* workaround for SATA R-ERR, to ignore phy glitch */
2592 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2593 tmp &= ~(1 << 9);
2594 tmp |= (1 << 10);
2595 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2596
2597 /* enable retry 127 times */
2598 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2599
2600 /* extend open frame timeout to max */
2601 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2602 tmp &= ~0xffff;
2603 tmp |= 0x3fff;
2604 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2605
2606 /* workaround for WDTIMEOUT , set to 550 ms */
2607 mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
2608
2609 /* not to halt for different port op during wideport link change */
2610 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2611
2612 /* workaround for Seagate disk not-found OOB sequence, recv
2613 * COMINIT before sending out COMWAKE */
2614 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2615 tmp &= 0x0000ffff;
2616 tmp |= 0x00fa0000;
2617 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2618
2619 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2620 tmp &= 0x1fffffff;
2621 tmp |= (2U << 29); /* 8 ms retry */
2622 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2623
2624 /* TEST - for phy decoding error, adjust voltage levels */
2625 mw32(P0_VSR_ADDR + 0, 0x8);
2626 mw32(P0_VSR_DATA + 0, 0x2F0);
2627
2628 mw32(P0_VSR_ADDR + 8, 0x8);
2629 mw32(P0_VSR_DATA + 8, 0x2F0);
2630
2631 mw32(P0_VSR_ADDR + 16, 0x8);
2632 mw32(P0_VSR_DATA + 16, 0x2F0);
2633
2634 mw32(P0_VSR_ADDR + 24, 0x8);
2635 mw32(P0_VSR_DATA + 24, 0x2F0);
2636
2637}
2638
2639static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2640{
2641 void __iomem *regs = mvi->regs;
2642 u32 tmp;
2643
2644 tmp = mr32(PCS);
2645 if (mvi->chip->n_phy <= 4)
2646 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2647 else
2648 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2649 mw32(PCS, tmp);
2650}
2651
2652static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2653{
2654 void __iomem *regs = mvi->regs;
2655 u32 reg;
2656 struct mvs_phy *phy = &mvi->phy[i];
2657
2658 /* TODO check & save device type */
2659 reg = mr32(GBL_PORT_TYPE);
2660
2661 if (reg & MODE_SAS_SATA & (1 << i))
2662 phy->phy_type |= PORT_TYPE_SAS;
2663 else
2664 phy->phy_type |= PORT_TYPE_SATA;
2665}
2666
2667static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2668{
2669 u32 *s = (u32 *) buf;
2670
2671 if (!s)
2672 return NULL;
2673
2674 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2675 s[3] = mvs_read_port_cfg_data(mvi, i);
2676
2677 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2678 s[2] = mvs_read_port_cfg_data(mvi, i);
2679
2680 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2681 s[1] = mvs_read_port_cfg_data(mvi, i);
2682
2683 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2684 s[0] = mvs_read_port_cfg_data(mvi, i);
2685
2686 return (void *)s;
2687}
2688
2689static u32 mvs_is_sig_fis_received(u32 irq_status)
2690{
2691 return irq_status & PHYEV_SIG_FIS;
2692}
2693
2694static void mvs_update_wideport(struct mvs_info *mvi, int i)
2695{
2696 struct mvs_phy *phy = &mvi->phy[i];
2697 struct mvs_port *port = phy->port;
2698 int j, no;
2699
2700 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2701 if (no & 1) {
2702 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2703 mvs_write_port_cfg_data(mvi, no,
2704 port->wide_port_phymap);
2705 } else {
2706 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2707 mvs_write_port_cfg_data(mvi, no, 0);
2708 }
2709}
2710
2711static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2712{
2713 u32 tmp;
2714 struct mvs_phy *phy = &mvi->phy[i];
2715 struct mvs_port *port = phy->port;;
2716
2717 tmp = mvs_read_phy_ctl(mvi, i);
2718
2719 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2720 if (!port)
2721 phy->phy_attached = 1;
2722 return tmp;
2723 }
2724
2725 if (port) {
2726 if (phy->phy_type & PORT_TYPE_SAS) {
2727 port->wide_port_phymap &= ~(1U << i);
2728 if (!port->wide_port_phymap)
2729 port->port_attached = 0;
2730 mvs_update_wideport(mvi, i);
2731 } else if (phy->phy_type & PORT_TYPE_SATA)
2732 port->port_attached = 0;
2733 mvs_free_reg_set(mvi, phy->port);
2734 phy->port = NULL;
2735 phy->phy_attached = 0;
2736 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2737 }
2738 return 0;
2739}
2740
2741static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2742 int get_st)
2743{
2744 struct mvs_phy *phy = &mvi->phy[i];
2745 struct pci_dev *pdev = mvi->pdev;
2746 u32 tmp;
2747 u64 tmp64;
2748
2749 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2750 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2751
2752 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2753 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2754
2755 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2756 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2757
2758 if (get_st) {
2759 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2760 phy->phy_status = mvs_is_phy_ready(mvi, i);
2761 }
2762
2763 if (phy->phy_status) {
2764 u32 phy_st;
2765 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2766
2767 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2768 phy_st = mvs_read_port_cfg_data(mvi, i);
2769
2770 sas_phy->linkrate =
2771 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2772 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2773 phy->minimum_linkrate =
2774 (phy->phy_status &
2775 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
2776 phy->maximum_linkrate =
2777 (phy->phy_status &
2778 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
2779
2780 if (phy->phy_type & PORT_TYPE_SAS) {
2781 /* Updated attached_sas_addr */
2782 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2783 phy->att_dev_sas_addr =
2784 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2785 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2786 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2787 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2788 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2789 phy->identify.device_type =
2790 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2791
2792 if (phy->identify.device_type == SAS_END_DEV)
2793 phy->identify.target_port_protocols =
2794 SAS_PROTOCOL_SSP;
2795 else if (phy->identify.device_type != NO_DEVICE)
2796 phy->identify.target_port_protocols =
2797 SAS_PROTOCOL_SMP;
2798 if (phy_st & PHY_OOB_DTCTD)
2799 sas_phy->oob_mode = SAS_OOB_MODE;
2800 phy->frame_rcvd_size =
2801 sizeof(struct sas_identify_frame);
2802 } else if (phy->phy_type & PORT_TYPE_SATA) {
2803 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2804 if (mvs_is_sig_fis_received(phy->irq_status)) {
2805 phy->att_dev_sas_addr = i; /* temp */
2806 if (phy_st & PHY_OOB_DTCTD)
2807 sas_phy->oob_mode = SATA_OOB_MODE;
2808 phy->frame_rcvd_size =
2809 sizeof(struct dev_to_host_fis);
2810 mvs_get_d2h_reg(mvi, i,
2811 (void *)sas_phy->frame_rcvd);
2812 } else {
2813 dev_printk(KERN_DEBUG, &pdev->dev,
2814 "No sig fis\n");
2815 phy->phy_type &= ~(PORT_TYPE_SATA);
2816 goto out_done;
2817 }
2818 }
2819 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2820 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2821
2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n",
2825 i,
2826 (unsigned long long)phy->att_dev_sas_addr,
2827 (unsigned long long)phy->dev_sas_addr);
2828 dev_printk(KERN_DEBUG, &pdev->dev,
2829 "Rate = %x , type = %d\n",
2830 sas_phy->linkrate, phy->phy_type);
2831
2832 /* workaround for HW phy decoding error on 1.5g disk drive */
2833 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2834 tmp = mvs_read_port_vsr_data(mvi, i);
2835 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2836 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2837 SAS_LINK_RATE_1_5_GBPS)
2838 tmp &= ~PHY_MODE6_LATECLK;
2839 else
2840 tmp |= PHY_MODE6_LATECLK;
2841 mvs_write_port_vsr_data(mvi, i, tmp);
2842
2843 }
2844out_done:
2845 if (get_st)
2846 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2847}
2848
2849static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2850{
2851 struct sas_ha_struct *sas_ha = sas_phy->ha;
2852 struct mvs_info *mvi = sas_ha->lldd_ha;
2853 struct asd_sas_port *sas_port = sas_phy->port;
2854 struct mvs_phy *phy = sas_phy->lldd_phy;
2855 struct mvs_port *port = &mvi->port[sas_port->id];
2856 unsigned long flags;
2857
2858 spin_lock_irqsave(&mvi->lock, flags);
2859 port->port_attached = 1;
2860 phy->port = port;
2861 port->taskfileset = MVS_ID_NOT_MAPPED;
2862 if (phy->phy_type & PORT_TYPE_SAS) {
2863 port->wide_port_phymap = sas_port->phy_mask;
2864 mvs_update_wideport(mvi, sas_phy->id);
2865 }
2866 spin_unlock_irqrestore(&mvi->lock, flags);
2867}
2868
2869static int mvs_I_T_nexus_reset(struct domain_device *dev)
2870{
2871 return TMF_RESP_FUNC_FAILED;
2872}
2873
2874static int __devinit mvs_hw_init(struct mvs_info *mvi)
2875{
2876 void __iomem *regs = mvi->regs;
2877 int i;
2878 u32 tmp, cctl;
2879
2880 /* make sure interrupts are masked immediately (paranoia) */
2881 mw32(GBL_CTL, 0);
2882 tmp = mr32(GBL_CTL);
2883
2884 /* Reset Controller */
2885 if (!(tmp & HBA_RST)) {
2886 if (mvi->flags & MVF_PHY_PWR_FIX) {
2887 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2888 tmp &= ~PCTL_PWR_ON;
2889 tmp |= PCTL_OFF;
2890 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2891
2892 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2893 tmp &= ~PCTL_PWR_ON;
2894 tmp |= PCTL_OFF;
2895 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2896 }
2897
2898 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2899 mw32_f(GBL_CTL, HBA_RST);
2900 }
2901
2902 /* wait for reset to finish; timeout is just a guess */
2903 i = 1000;
2904 while (i-- > 0) {
2905 msleep(10);
2906
2907 if (!(mr32(GBL_CTL) & HBA_RST))
2908 break;
2909 }
2910 if (mr32(GBL_CTL) & HBA_RST) {
2911 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2912 return -EBUSY;
2913 }
2914
2915 /* Init Chip */
2916 /* make sure RST is set; HBA_RST /should/ have done that for us */
2917 cctl = mr32(CTL);
2918 if (cctl & CCTL_RST)
2919 cctl &= ~CCTL_RST;
2920 else
2921 mw32_f(CTL, cctl | CCTL_RST);
2922
2923 /* write to device control _AND_ device status register? - A.C. */
2924 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2925 tmp &= ~PRD_REQ_MASK;
2926 tmp |= PRD_REQ_SIZE;
2927 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2928
2929 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2930 tmp |= PCTL_PWR_ON;
2931 tmp &= ~PCTL_OFF;
2932 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2933
2934 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2935 tmp |= PCTL_PWR_ON;
2936 tmp &= ~PCTL_OFF;
2937 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2938
2939 mw32_f(CTL, cctl);
2940
2941 /* reset control */
2942 mw32(PCS, 0); /*MVS_PCS */
2943
2944 mvs_phy_hacks(mvi);
2945
2946 mw32(CMD_LIST_LO, mvi->slot_dma);
2947 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2948
2949 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2950 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2951
2952 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2953 mw32(TX_LO, mvi->tx_dma);
2954 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2955
2956 mw32(RX_CFG, MVS_RX_RING_SZ);
2957 mw32(RX_LO, mvi->rx_dma);
2958 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2959
2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(1100);
2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2966 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2967
2968 mvs_detect_porttype(mvi, i);
2969
2970 /* set phy local SAS address */
2971 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2972 mvs_write_port_cfg_data(mvi, i, lo);
2973 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2974 mvs_write_port_cfg_data(mvi, i, hi);
2975
2976 /* reset phy */
2977 tmp = mvs_read_phy_ctl(mvi, i);
2978 tmp |= PHY_RST;
2979 mvs_write_phy_ctl(mvi, i, tmp);
2980 }
2981
2982 msleep(100);
2983
2984 for (i = 0; i < mvi->chip->n_phy; i++) {
2985 /* clear phy int status */
2986 tmp = mvs_read_port_irq_stat(mvi, i);
2987 tmp &= ~PHYEV_SIG_FIS;
2988 mvs_write_port_irq_stat(mvi, i, tmp);
2989
2990 /* set phy int mask */
2991 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2992 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2993 mvs_write_port_irq_mask(mvi, i, tmp);
2994
2995 msleep(100);
2996 mvs_update_phyinfo(mvi, i, 1);
2997 mvs_enable_xmt(mvi, i);
2998 }
2999
3000 /* FIXME: update wide port bitmaps */
3001
3002 /* little endian for open address and command table, etc. */
3003 /* A.C.
3004 * it seems that ( from the spec ) turning on big-endian won't
3005 * do us any good on big-endian machines, need further confirmation
3006 */
3007 cctl = mr32(CTL);
3008 cctl |= CCTL_ENDIAN_CMD;
3009 cctl |= CCTL_ENDIAN_DATA;
3010 cctl &= ~CCTL_ENDIAN_OPEN;
3011 cctl |= CCTL_ENDIAN_RSP;
3012 mw32_f(CTL, cctl);
3013
3014 /* reset CMD queue */
3015 tmp = mr32(PCS);
3016 tmp |= PCS_CMD_RST;
3017 mw32(PCS, tmp);
3018 /* interrupt coalescing may cause missing HW interrput in some case,
3019 * and the max count is 0x1ff, while our max slot is 0x200,
3020 * it will make count 0.
3021 */
3022 tmp = 0;
3023 mw32(INT_COAL, tmp);
3024
3025 tmp = 0x100;
3026 mw32(INT_COAL_TMOUT, tmp);
3027
3028 /* ladies and gentlemen, start your engines */
3029 mw32(TX_CFG, 0);
3030 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
3031 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
3032 /* enable CMD/CMPL_Q/RESP mode */
3033 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
3034
3035 /* enable completion queue interrupt */
3036 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
3037 mw32(INT_MASK, tmp);
3038
3039 /* Enable SRS interrupt */
3040 mw32(INT_MASK_SRS, 0xFF);
3041 return 0;
3042}
3043
3044static void __devinit mvs_print_info(struct mvs_info *mvi)
3045{
3046 struct pci_dev *pdev = mvi->pdev;
3047 static int printed_version;
3048
3049 if (!printed_version++)
3050 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3051
3052 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
3053 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
3054}
3055
3056static int __devinit mvs_pci_init(struct pci_dev *pdev,
3057 const struct pci_device_id *ent)
3058{
3059 int rc;
3060 struct mvs_info *mvi;
3061 irq_handler_t irq_handler = mvs_interrupt;
3062
3063 rc = pci_enable_device(pdev);
3064 if (rc)
3065 return rc;
3066
3067 pci_set_master(pdev);
3068
3069 rc = pci_request_regions(pdev, DRV_NAME);
3070 if (rc)
3071 goto err_out_disable;
3072
3073 rc = pci_go_64(pdev);
3074 if (rc)
3075 goto err_out_regions;
3076
3077 mvi = mvs_alloc(pdev, ent);
3078 if (!mvi) {
3079 rc = -ENOMEM;
3080 goto err_out_regions;
3081 }
3082
3083 rc = mvs_hw_init(mvi);
3084 if (rc)
3085 goto err_out_mvi;
3086
3087#ifndef MVS_DISABLE_MSI
3088 if (!pci_enable_msi(pdev)) {
3089 u32 tmp;
3090 void __iomem *regs = mvi->regs;
3091 mvi->flags |= MVF_MSI;
3092 irq_handler = mvs_msi_interrupt;
3093 tmp = mr32(PCS);
3094 mw32(PCS, tmp | PCS_SELF_CLEAR);
3095 }
3096#endif
3097
3098 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
3099 if (rc)
3100 goto err_out_msi;
3101
3102 rc = scsi_add_host(mvi->shost, &pdev->dev);
3103 if (rc)
3104 goto err_out_irq;
3105
3106 rc = sas_register_ha(&mvi->sas);
3107 if (rc)
3108 goto err_out_shost;
3109
3110 pci_set_drvdata(pdev, mvi);
3111
3112 mvs_print_info(mvi);
3113
3114 mvs_hba_interrupt_enable(mvi);
3115
3116 scsi_scan_host(mvi->shost);
3117
3118 return 0;
3119
3120err_out_shost:
3121 scsi_remove_host(mvi->shost);
3122err_out_irq:
3123 free_irq(pdev->irq, mvi);
3124err_out_msi:
3125 if (mvi->flags |= MVF_MSI)
3126 pci_disable_msi(pdev);
3127err_out_mvi:
3128 mvs_free(mvi);
3129err_out_regions:
3130 pci_release_regions(pdev);
3131err_out_disable:
3132 pci_disable_device(pdev);
3133 return rc;
3134}
3135
3136static void __devexit mvs_pci_remove(struct pci_dev *pdev)
3137{
3138 struct mvs_info *mvi = pci_get_drvdata(pdev);
3139
3140 pci_set_drvdata(pdev, NULL);
3141
3142 if (mvi) {
3143 sas_unregister_ha(&mvi->sas);
3144 mvs_hba_interrupt_disable(mvi);
3145 sas_remove_host(mvi->shost);
3146 scsi_remove_host(mvi->shost);
3147
3148 free_irq(pdev->irq, mvi);
3149 if (mvi->flags & MVF_MSI)
3150 pci_disable_msi(pdev);
3151 mvs_free(mvi);
3152 pci_release_regions(pdev);
3153 }
3154 pci_disable_device(pdev);
3155}
3156
3157static struct sas_domain_function_template mvs_transport_ops = {
3158 .lldd_execute_task = mvs_task_exec,
3159 .lldd_control_phy = mvs_phy_control,
3160 .lldd_abort_task = mvs_task_abort,
3161 .lldd_port_formed = mvs_port_formed,
3162 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
3163};
3164
3165static struct pci_device_id __devinitdata mvs_pci_table[] = {
3166 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
3167 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
3168 {
3169 .vendor = PCI_VENDOR_ID_MARVELL,
3170 .device = 0x6440,
3171 .subvendor = PCI_ANY_ID,
3172 .subdevice = 0x6480,
3173 .class = 0,
3174 .class_mask = 0,
3175 .driver_data = chip_6480,
3176 },
3177 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
3178 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
3179
3180 { } /* terminate list */
3181};
3182
3183static struct pci_driver mvs_pci_driver = {
3184 .name = DRV_NAME,
3185 .id_table = mvs_pci_table,
3186 .probe = mvs_pci_init,
3187 .remove = __devexit_p(mvs_pci_remove),
3188};
3189
3190static int __init mvs_init(void)
3191{
3192 int rc;
3193
3194 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
3195 if (!mvs_stt)
3196 return -ENOMEM;
3197
3198 rc = pci_register_driver(&mvs_pci_driver);
3199 if (rc)
3200 goto err_out;
3201
3202 return 0;
3203
3204err_out:
3205 sas_release_transport(mvs_stt);
3206 return rc;
3207}
3208
3209static void __exit mvs_exit(void)
3210{
3211 pci_unregister_driver(&mvs_pci_driver);
3212 sas_release_transport(mvs_stt);
3213}
3214
3215module_init(mvs_init);
3216module_exit(mvs_exit);
3217
3218MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
3219MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
3220MODULE_VERSION(DRV_VERSION);
3221MODULE_LICENSE("GPL");
3222MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 000000000000..6de7af27e507
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,42 @@
1#
2# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the 88SE64XX/88SE94XX driver.
10#
11# The 88SE64XX/88SE94XX driver is free software; you can redistribute
12# it and/or modify it under the terms of the GNU General Public License
13# as published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_MVSAS
28 tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 select FW_LOADER
32 help
33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
34 PCI-E 88SE94XX chip based host adapters.
35
36config SCSI_MVSAS_DEBUG
37 bool "Compile in debug mode"
38 default y
39 depends on SCSI_MVSAS
40 help
41 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
42 the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 000000000000..52ac4264677d
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,32 @@
1#
2# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DMV_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
29mvsas-y += mv_init.o \
30 mv_sas.o \
31 mv_64xx.o \
32 mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 000000000000..10a5077b6aed
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,793 @@
1/*
2 * Marvell 88SE64xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_64xx.h"
27#include "mv_chips.h"
28
29static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 void __iomem *regs = mvi->regs;
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34
35 /* TODO check & save device type */
36 reg = mr32(MVS_GBL_PORT_TYPE);
37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
38 if (reg & MODE_SAS_SATA & (1 << i))
39 phy->phy_type |= PORT_TYPE_SAS;
40 else
41 phy->phy_type |= PORT_TYPE_SATA;
42}
43
44static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
45{
46 void __iomem *regs = mvi->regs;
47 u32 tmp;
48
49 tmp = mr32(MVS_PCS);
50 if (mvi->chip->n_phy <= 4)
51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
52 else
53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
54 mw32(MVS_PCS, tmp);
55}
56
57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
58{
59 void __iomem *regs = mvi->regs;
60
61 mvs_phy_hacks(mvi);
62
63 if (!(mvi->flags & MVF_FLAG_SOC)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR + 0, 0x8);
66 mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
67
68 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
69 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
70
71 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
72 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
73
74 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
75 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
76 } else {
77 int i;
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE, 0);
80 for (i = 0; i < mvi->chip->n_phy; i++) {
81 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
82 mvs_write_port_vsr_data(mvi, i, 0x90000000);
83 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
84 mvs_write_port_vsr_data(mvi, i, 0x50f2);
85 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
86 mvs_write_port_vsr_data(mvi, i, 0x0e);
87 }
88 }
89}
90
91static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
92{
93 void __iomem *regs = mvi->regs;
94 u32 reg, tmp;
95
96 if (!(mvi->flags & MVF_FLAG_SOC)) {
97 if (phy_id < 4)
98 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
99 else
100 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
101
102 } else
103 reg = mr32(MVS_PHY_CTL);
104
105 tmp = reg;
106 if (phy_id < 4)
107 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
108 else
109 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
110
111 if (!(mvi->flags & MVF_FLAG_SOC)) {
112 if (phy_id < 4) {
113 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
114 mdelay(10);
115 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
116 } else {
117 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
118 mdelay(10);
119 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
120 }
121 } else {
122 mw32(MVS_PHY_CTL, tmp);
123 mdelay(10);
124 mw32(MVS_PHY_CTL, reg);
125 }
126}
127
128static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
129{
130 u32 tmp;
131 tmp = mvs_read_port_irq_stat(mvi, phy_id);
132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard)
136 tmp |= PHY_RST_HARD;
137 else
138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) {
141 do {
142 tmp = mvs_read_phy_ctl(mvi, phy_id);
143 } while (tmp & PHY_RST_HARD);
144 }
145}
146
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 int i;
152
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL, 0);
155 tmp = mr32(MVS_GBL_CTL);
156
157 /* Reset Controller */
158 if (!(tmp & HBA_RST)) {
159 if (mvi->flags & MVF_PHY_PWR_FIX) {
160 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
161 tmp &= ~PCTL_PWR_OFF;
162 tmp |= PCTL_PHY_DSBL;
163 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
164
165 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
166 tmp &= ~PCTL_PWR_OFF;
167 tmp |= PCTL_PHY_DSBL;
168 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 }
170 }
171
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL, 0);
174 tmp = mr32(MVS_GBL_CTL);
175
176 /* Reset Controller */
177 if (!(tmp & HBA_RST)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL, HBA_RST);
180 }
181
182 /* wait for reset to finish; timeout is just a guess */
183 i = 1000;
184 while (i-- > 0) {
185 msleep(10);
186
187 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
188 break;
189 }
190 if (mr32(MVS_GBL_CTL) & HBA_RST) {
191 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
192 return -EBUSY;
193 }
194 return 0;
195}
196
197static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
198{
199 void __iomem *regs = mvi->regs;
200 u32 tmp;
201 if (!(mvi->flags & MVF_FLAG_SOC)) {
202 u32 offs;
203 if (phy_id < 4)
204 offs = PCR_PHY_CTL;
205 else {
206 offs = PCR_PHY_CTL2;
207 phy_id -= 4;
208 }
209 pci_read_config_dword(mvi->pdev, offs, &tmp);
210 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
211 pci_write_config_dword(mvi->pdev, offs, tmp);
212 } else {
213 tmp = mr32(MVS_PHY_CTL);
214 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
215 mw32(MVS_PHY_CTL, tmp);
216 }
217}
218
219static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
220{
221 void __iomem *regs = mvi->regs;
222 u32 tmp;
223 if (!(mvi->flags & MVF_FLAG_SOC)) {
224 u32 offs;
225 if (phy_id < 4)
226 offs = PCR_PHY_CTL;
227 else {
228 offs = PCR_PHY_CTL2;
229 phy_id -= 4;
230 }
231 pci_read_config_dword(mvi->pdev, offs, &tmp);
232 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
233 pci_write_config_dword(mvi->pdev, offs, tmp);
234 } else {
235 tmp = mr32(MVS_PHY_CTL);
236 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
237 mw32(MVS_PHY_CTL, tmp);
238 }
239}
240
241static int __devinit mvs_64xx_init(struct mvs_info *mvi)
242{
243 void __iomem *regs = mvi->regs;
244 int i;
245 u32 tmp, cctl;
246
247 if (mvi->pdev && mvi->pdev->revision == 0)
248 mvi->flags |= MVF_PHY_PWR_FIX;
249 if (!(mvi->flags & MVF_FLAG_SOC)) {
250 mvs_show_pcie_usage(mvi);
251 tmp = mvs_64xx_chip_reset(mvi);
252 if (tmp)
253 return tmp;
254 } else {
255 tmp = mr32(MVS_PHY_CTL);
256 tmp &= ~PCTL_PWR_OFF;
257 tmp |= PCTL_PHY_DSBL;
258 mw32(MVS_PHY_CTL, tmp);
259 }
260
261 /* Init Chip */
262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl = mr32(MVS_CTL) & 0xFFFF;
264 if (cctl & CCTL_RST)
265 cctl &= ~CCTL_RST;
266 else
267 mw32_f(MVS_CTL, cctl | CCTL_RST);
268
269 if (!(mvi->flags & MVF_FLAG_SOC)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
272 tmp &= ~PRD_REQ_MASK;
273 tmp |= PRD_REQ_SIZE;
274 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
275
276 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
277 tmp &= ~PCTL_PWR_OFF;
278 tmp &= ~PCTL_PHY_DSBL;
279 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
280
281 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
282 tmp &= PCTL_PWR_OFF;
283 tmp &= ~PCTL_PHY_DSBL;
284 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
285 } else {
286 tmp = mr32(MVS_PHY_CTL);
287 tmp &= ~PCTL_PWR_OFF;
288 tmp |= PCTL_COM_ON;
289 tmp &= ~PCTL_PHY_DSBL;
290 tmp |= PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 tmp &= ~PCTL_LINK_RST;
294 mw32(MVS_PHY_CTL, tmp);
295 msleep(100);
296 }
297
298 /* reset control */
299 mw32(MVS_PCS, 0); /* MVS_PCS */
300 /* init phys */
301 mvs_64xx_phy_hacks(mvi);
302
303 /* enable auto port detection */
304 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
305
306 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
307 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
308
309 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
310 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
311
312 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
313 mw32(MVS_TX_LO, mvi->tx_dma);
314 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
315
316 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
317 mw32(MVS_RX_LO, mvi->rx_dma);
318 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
319
320 for (i = 0; i < mvi->chip->n_phy; i++) {
321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
324 cpu_to_be64(mvi->phy[i].dev_sas_addr));
325
326 mvs_64xx_enable_xmt(mvi, i);
327
328 mvs_64xx_phy_reset(mvi, i, 1);
329 msleep(500);
330 mvs_64xx_detect_porttype(mvi, i);
331 }
332 if (mvi->flags & MVF_FLAG_SOC) {
333 /* set select registers */
334 writel(0x0E008000, regs + 0x000);
335 writel(0x59000008, regs + 0x004);
336 writel(0x20, regs + 0x008);
337 writel(0x20, regs + 0x00c);
338 writel(0x20, regs + 0x010);
339 writel(0x20, regs + 0x014);
340 writel(0x20, regs + 0x018);
341 writel(0x20, regs + 0x01c);
342 }
343 for (i = 0; i < mvi->chip->n_phy; i++) {
344 /* clear phy int status */
345 tmp = mvs_read_port_irq_stat(mvi, i);
346 tmp &= ~PHYEV_SIG_FIS;
347 mvs_write_port_irq_stat(mvi, i, tmp);
348
349 /* set phy int mask */
350 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
351 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
352 PHYEV_DEC_ERR;
353 mvs_write_port_irq_mask(mvi, i, tmp);
354
355 msleep(100);
356 mvs_update_phyinfo(mvi, i, 1);
357 }
358
359 /* FIXME: update wide port bitmaps */
360
361 /* little endian for open address and command table, etc. */
362 /*
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
365 */
366 cctl = mr32(MVS_CTL);
367 cctl |= CCTL_ENDIAN_CMD;
368 cctl |= CCTL_ENDIAN_DATA;
369 cctl &= ~CCTL_ENDIAN_OPEN;
370 cctl |= CCTL_ENDIAN_RSP;
371 mw32_f(MVS_CTL, cctl);
372
373 /* reset CMD queue */
374 tmp = mr32(MVS_PCS);
375 tmp |= PCS_CMD_RST;
376 mw32(MVS_PCS, tmp);
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
380 */
381 tmp = 0;
382 mw32(MVS_INT_COAL, tmp);
383
384 tmp = 0x100;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
405}
406
407static int mvs_64xx_ioremap(struct mvs_info *mvi)
408{
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412}
413
414static void mvs_64xx_iounmap(struct mvs_info *mvi)
415{
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418}
419
420static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
421{
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
424
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427}
428
429static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430{
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
436}
437
438static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
439{
440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451}
452
453static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454{
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459#ifndef MVS_USE_TASKLET
460 spin_lock(&mvi->lock);
461#endif
462 mvs_int_full(mvi);
463#ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi->lock);
465#endif
466 return IRQ_HANDLED;
467}
468
469static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
470{
471 u32 tmp;
472 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
473 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
474 do {
475 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
476 } while (tmp & 1 << (slot_idx % 32));
477 do {
478 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
479 } while (tmp & 1 << (slot_idx % 32));
480}
481
482static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
483 u32 tfs)
484{
485 void __iomem *regs = mvi->regs;
486 u32 tmp;
487
488 if (type == PORT_TYPE_SATA) {
489 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
490 mw32(MVS_INT_STAT_SRS_0, tmp);
491 }
492 mw32(MVS_INT_STAT, CINT_CI_STOP);
493 tmp = mr32(MVS_PCS) | 0xFF00;
494 mw32(MVS_PCS, tmp);
495}
496
497static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
498{
499 void __iomem *regs = mvi->regs;
500 u32 tmp, offs;
501
502 if (*tfs == MVS_ID_NOT_MAPPED)
503 return;
504
505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
506 if (*tfs < 16) {
507 tmp = mr32(MVS_PCS);
508 mw32(MVS_PCS, tmp & ~offs);
509 } else {
510 tmp = mr32(MVS_CTL);
511 mw32(MVS_CTL, tmp & ~offs);
512 }
513
514 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
515 if (tmp)
516 mw32(MVS_INT_STAT_SRS_0, tmp);
517
518 *tfs = MVS_ID_NOT_MAPPED;
519 return;
520}
521
522static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
523{
524 int i;
525 u32 tmp, offs;
526 void __iomem *regs = mvi->regs;
527
528 if (*tfs != MVS_ID_NOT_MAPPED)
529 return 0;
530
531 tmp = mr32(MVS_PCS);
532
533 for (i = 0; i < mvi->chip->srs_sz; i++) {
534 if (i == 16)
535 tmp = mr32(MVS_CTL);
536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
537 if (!(tmp & offs)) {
538 *tfs = i;
539
540 if (i < 16)
541 mw32(MVS_PCS, tmp | offs);
542 else
543 mw32(MVS_CTL, tmp | offs);
544 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
545 if (tmp)
546 mw32(MVS_INT_STAT_SRS_0, tmp);
547 return 0;
548 }
549 }
550 return MVS_ID_NOT_MAPPED;
551}
552
553void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
554{
555 int i;
556 struct scatterlist *sg;
557 struct mvs_prd *buf_prd = prd;
558 for_each_sg(scatter, sg, nr, i) {
559 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
560 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
561 buf_prd++;
562 }
563}
564
565static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
566{
567 u32 phy_st;
568 mvs_write_port_cfg_addr(mvi, i,
569 PHYR_PHY_STAT);
570 phy_st = mvs_read_port_cfg_data(mvi, i);
571 if (phy_st & PHY_OOB_DTCTD)
572 return 1;
573 return 0;
574}
575
576static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
577 struct sas_identify_frame *id)
578
579{
580 struct mvs_phy *phy = &mvi->phy[i];
581 struct asd_sas_phy *sas_phy = &phy->sas_phy;
582
583 sas_phy->linkrate =
584 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
586
587 phy->minimum_linkrate =
588 (phy->phy_status &
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
590 phy->maximum_linkrate =
591 (phy->phy_status &
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
593
594 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
595 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
596
597 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
598 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
599
600 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
601 phy->att_dev_sas_addr =
602 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
603 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
604 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
605 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606}
607
608static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
609{
610 u32 tmp;
611 struct mvs_phy *phy = &mvi->phy[i];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
614 tmp = mvs_read_port_vsr_data(mvi, i);
615 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
617 SAS_LINK_RATE_1_5_GBPS)
618 tmp &= ~PHY_MODE6_LATECLK;
619 else
620 tmp |= PHY_MODE6_LATECLK;
621 mvs_write_port_vsr_data(mvi, i, tmp);
622}
623
624void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
625 struct sas_phy_linkrates *rates)
626{
627 u32 lrmin = 0, lrmax = 0;
628 u32 tmp;
629
630 tmp = mvs_read_phy_ctl(mvi, phy_id);
631 lrmin = (rates->minimum_linkrate << 8);
632 lrmax = (rates->maximum_linkrate << 12);
633
634 if (lrmin) {
635 tmp &= ~(0xf << 8);
636 tmp |= lrmin;
637 }
638 if (lrmax) {
639 tmp &= ~(0xf << 12);
640 tmp |= lrmax;
641 }
642 mvs_write_phy_ctl(mvi, phy_id, tmp);
643 mvs_64xx_phy_reset(mvi, phy_id, 1);
644}
645
646static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
647{
648 u32 tmp;
649 void __iomem *regs = mvi->regs;
650 tmp = mr32(MVS_PCS);
651 mw32(MVS_PCS, tmp & 0xFFFF);
652 mw32(MVS_PCS, tmp);
653 tmp = mr32(MVS_CTL);
654 mw32(MVS_CTL, tmp & 0xFFFF);
655 mw32(MVS_CTL, tmp);
656}
657
658
659u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
660{
661 void __iomem *regs = mvi->regs_ex;
662 return ior32(SPI_DATA_REG_64XX);
663}
664
665void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
666{
667 void __iomem *regs = mvi->regs_ex;
668 iow32(SPI_DATA_REG_64XX, data);
669}
670
671
672int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
673 u32 *dwCmd,
674 u8 cmd,
675 u8 read,
676 u8 length,
677 u32 addr
678 )
679{
680 u32 dwTmp;
681
682 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
683 if (read)
684 dwTmp |= 1U<<23;
685
686 if (addr != MV_MAX_U32) {
687 dwTmp |= 1U<<22;
688 dwTmp |= (addr & 0x0003FFFF);
689 }
690
691 *dwCmd = dwTmp;
692 return 0;
693}
694
695
696int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
697{
698 void __iomem *regs = mvi->regs_ex;
699 int retry;
700
701 for (retry = 0; retry < 1; retry++) {
702 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
703 iow32(SPI_CMD_REG_64XX, cmd);
704 iow32(SPI_CTRL_REG_64XX,
705 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 }
707
708 return 0;
709}
710
711int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
712{
713 void __iomem *regs = mvi->regs_ex;
714 u32 i, dwTmp;
715
716 for (i = 0; i < timeout; i++) {
717 dwTmp = ior32(SPI_CTRL_REG_64XX);
718 if (!(dwTmp & SPI_CTRL_SPISTART))
719 return 0;
720 msleep(10);
721 }
722
723 return -1;
724}
725
726#ifndef DISABLE_HOTPLUG_DMA_FIX
727void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
728{
729 int i;
730 struct mvs_prd *buf_prd = prd;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737}
738#endif
739
740const struct mvs_dispatch mvs_64xx_dispatch = {
741 "mv64xx",
742 mvs_64xx_init,
743 NULL,
744 mvs_64xx_ioremap,
745 mvs_64xx_iounmap,
746 mvs_64xx_isr,
747 mvs_64xx_isr_status,
748 mvs_64xx_interrupt_enable,
749 mvs_64xx_interrupt_disable,
750 mvs_read_phy_ctl,
751 mvs_write_phy_ctl,
752 mvs_read_port_cfg_data,
753 mvs_write_port_cfg_data,
754 mvs_write_port_cfg_addr,
755 mvs_read_port_vsr_data,
756 mvs_write_port_vsr_data,
757 mvs_write_port_vsr_addr,
758 mvs_read_port_irq_stat,
759 mvs_write_port_irq_stat,
760 mvs_read_port_irq_mask,
761 mvs_write_port_irq_mask,
762 mvs_get_sas_addr,
763 mvs_64xx_command_active,
764 mvs_64xx_issue_stop,
765 mvs_start_delivery,
766 mvs_rx_update,
767 mvs_int_full,
768 mvs_64xx_assign_reg_set,
769 mvs_64xx_free_reg_set,
770 mvs_get_prd_size,
771 mvs_get_prd_count,
772 mvs_64xx_make_prd,
773 mvs_64xx_detect_porttype,
774 mvs_64xx_oob_done,
775 mvs_64xx_fix_phy_info,
776 mvs_64xx_phy_work_around,
777 mvs_64xx_phy_set_link_rate,
778 mvs_hw_max_link_rate,
779 mvs_64xx_phy_disable,
780 mvs_64xx_phy_enable,
781 mvs_64xx_phy_reset,
782 mvs_64xx_stp_reset,
783 mvs_64xx_clear_active_cmds,
784 mvs_64xx_spi_read_data,
785 mvs_64xx_spi_write_data,
786 mvs_64xx_spi_buildcmd,
787 mvs_64xx_spi_issuecmd,
788 mvs_64xx_spi_waitdataready,
789#ifndef DISABLE_HOTPLUG_DMA_FIX
790 mvs_64xx_fix_dma,
791#endif
792};
793
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 000000000000..42e947d9795e
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,151 @@
1/*
2 * Marvell 88SE64xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS64XX_REG_H_
26#define _MVS64XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
31
32/* enhanced mode registers (BAR4) */
33enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x08, /* global irq status */
36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
37
38 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
39 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
40
41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
42
43 MVS_CTL = 0x100, /* SAS/SATA port configuration */
44 MVS_PCS = 0x104, /* SAS/SATA port control/status */
45 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
46 MVS_CMD_LIST_HI = 0x10C,
47 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
48 MVS_RX_FIS_HI = 0x114,
49
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67
68 /* ports 1-3 follow after this */
69 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
70 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
71 /* ports 5-7 follow after this */
72 MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
73 MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
77 /* ports 5-7 follow after this */
78 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
79
80 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
81 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
82
83 /* ports 1-3 follow after this */
84 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
85 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
86 /* ports 5-7 follow after this */
87 MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
88 MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
89
90 /* ports 1-3 follow after this */
91 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
92 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
93 /* ports 5-7 follow after this */
94 MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
95 MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
96};
97
98enum pci_cfg_registers {
99 PCR_PHY_CTL = 0x40,
100 PCR_PHY_CTL2 = 0x90,
101 PCR_DEV_CTRL = 0xE8,
102 PCR_LINK_STAT = 0xF2,
103};
104
105/* SAS/SATA Vendor Specific Port Registers */
106enum sas_sata_vsp_regs {
107 VSR_PHY_STAT = 0x00, /* Phy Status */
108 VSR_PHY_MODE1 = 0x01, /* phy tx */
109 VSR_PHY_MODE2 = 0x02, /* tx scc */
110 VSR_PHY_MODE3 = 0x03, /* pll */
111 VSR_PHY_MODE4 = 0x04, /* VCO */
112 VSR_PHY_MODE5 = 0x05, /* Rx */
113 VSR_PHY_MODE6 = 0x06, /* CDR */
114 VSR_PHY_MODE7 = 0x07, /* Impedance */
115 VSR_PHY_MODE8 = 0x08, /* Voltage */
116 VSR_PHY_MODE9 = 0x09, /* Test */
117 VSR_PHY_MODE10 = 0x0A, /* Power */
118 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
119 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
120 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
121};
122
123enum chip_register_bits {
124 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
125 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
126 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
127 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
128 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
129};
130
131#define MAX_SG_ENTRY 64
132
133struct mvs_prd {
134 __le64 addr; /* 64-bit buffer address */
135 __le32 reserved;
136 __le32 len; /* 16-bit length */
137};
138
139#define SPI_CTRL_REG 0xc0
140#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
141#define SPI_CTRL_SPIRDY (1U<<22)
142#define SPI_CTRL_SPISTART (1U<<20)
143
144#define SPI_CMD_REG 0xc4
145#define SPI_DATA_REG 0xc8
146
147#define SPI_CTRL_REG_64XX 0x10
148#define SPI_CMD_REG_64XX 0x14
149#define SPI_DATA_REG_64XX 0x18
150
151#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 000000000000..0940fae19d20
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
1/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_94xx.h"
27#include "mv_chips.h"
28
29static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i];
33 u32 phy_status;
34
35 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
36 reg = mvs_read_port_vsr_data(mvi, i);
37 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 switch (phy_status) {
40 case 0x10:
41 phy->phy_type |= PORT_TYPE_SAS;
42 break;
43 case 0x1d:
44 default:
45 phy->phy_type |= PORT_TYPE_SATA;
46 break;
47 }
48}
49
50static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
51{
52 void __iomem *regs = mvi->regs;
53 u32 tmp;
54
55 tmp = mr32(MVS_PCS);
56 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
57 mw32(MVS_PCS, tmp);
58}
59
60static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
61{
62 u32 tmp;
63
64 tmp = mvs_read_port_irq_stat(mvi, phy_id);
65 tmp &= ~PHYEV_RDY_CH;
66 mvs_write_port_irq_stat(mvi, phy_id, tmp);
67 if (hard) {
68 tmp = mvs_read_phy_ctl(mvi, phy_id);
69 tmp |= PHY_RST_HARD;
70 mvs_write_phy_ctl(mvi, phy_id, tmp);
71 do {
72 tmp = mvs_read_phy_ctl(mvi, phy_id);
73 } while (tmp & PHY_RST_HARD);
74 } else {
75 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
76 tmp = mvs_read_port_vsr_data(mvi, phy_id);
77 tmp |= PHY_RST;
78 mvs_write_port_vsr_data(mvi, phy_id, tmp);
79 }
80}
81
82static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
83{
84 u32 tmp;
85 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
86 tmp = mvs_read_port_vsr_data(mvi, phy_id);
87 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
88}
89
90static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
91{
92 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
93 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
94 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
95 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
96 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
97 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
98}
99
100static int __devinit mvs_94xx_init(struct mvs_info *mvi)
101{
102 void __iomem *regs = mvi->regs;
103 int i;
104 u32 tmp, cctl;
105
106 mvs_show_pcie_usage(mvi);
107 if (mvi->flags & MVF_FLAG_SOC) {
108 tmp = mr32(MVS_PHY_CTL);
109 tmp &= ~PCTL_PWR_OFF;
110 tmp |= PCTL_PHY_DSBL;
111 mw32(MVS_PHY_CTL, tmp);
112 }
113
114 /* Init Chip */
115 /* make sure RST is set; HBA_RST /should/ have done that for us */
116 cctl = mr32(MVS_CTL) & 0xFFFF;
117 if (cctl & CCTL_RST)
118 cctl &= ~CCTL_RST;
119 else
120 mw32_f(MVS_CTL, cctl | CCTL_RST);
121
122 if (mvi->flags & MVF_FLAG_SOC) {
123 tmp = mr32(MVS_PHY_CTL);
124 tmp &= ~PCTL_PWR_OFF;
125 tmp |= PCTL_COM_ON;
126 tmp &= ~PCTL_PHY_DSBL;
127 tmp |= PCTL_LINK_RST;
128 mw32(MVS_PHY_CTL, tmp);
129 msleep(100);
130 tmp &= ~PCTL_LINK_RST;
131 mw32(MVS_PHY_CTL, tmp);
132 msleep(100);
133 }
134
135 /* reset control */
136 mw32(MVS_PCS, 0); /* MVS_PCS */
137 mw32(MVS_STP_REG_SET_0, 0);
138 mw32(MVS_STP_REG_SET_1, 0);
139
140 /* init phys */
141 mvs_phy_hacks(mvi);
142
143 /* disable Multiplexing, enable phy implemented */
144 mw32(MVS_PORTS_IMP, 0xFF);
145
146
147 mw32(MVS_PA_VSR_ADDR, 0x00000104);
148 mw32(MVS_PA_VSR_PORT, 0x00018080);
149 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
150 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
151
152 /* set LED blink when IO*/
153 mw32(MVS_PA_VSR_ADDR, 0x00000030);
154 tmp = mr32(MVS_PA_VSR_PORT);
155 tmp &= 0xFFFF00FF;
156 tmp |= 0x00003300;
157 mw32(MVS_PA_VSR_PORT, tmp);
158
159 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
160 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
161
162 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
163 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
164
165 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
166 mw32(MVS_TX_LO, mvi->tx_dma);
167 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
168
169 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
170 mw32(MVS_RX_LO, mvi->rx_dma);
171 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
172
173 for (i = 0; i < mvi->chip->n_phy; i++) {
174 mvs_94xx_phy_disable(mvi, i);
175 /* set phy local SAS address */
176 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
177 (mvi->phy[i].dev_sas_addr));
178
179 mvs_94xx_enable_xmt(mvi, i);
180 mvs_94xx_phy_enable(mvi, i);
181
182 mvs_94xx_phy_reset(mvi, i, 1);
183 msleep(500);
184 mvs_94xx_detect_porttype(mvi, i);
185 }
186
187 if (mvi->flags & MVF_FLAG_SOC) {
188 /* set select registers */
189 writel(0x0E008000, regs + 0x000);
190 writel(0x59000008, regs + 0x004);
191 writel(0x20, regs + 0x008);
192 writel(0x20, regs + 0x00c);
193 writel(0x20, regs + 0x010);
194 writel(0x20, regs + 0x014);
195 writel(0x20, regs + 0x018);
196 writel(0x20, regs + 0x01c);
197 }
198 for (i = 0; i < mvi->chip->n_phy; i++) {
199 /* clear phy int status */
200 tmp = mvs_read_port_irq_stat(mvi, i);
201 tmp &= ~PHYEV_SIG_FIS;
202 mvs_write_port_irq_stat(mvi, i, tmp);
203
204 /* set phy int mask */
205 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
206 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
207 mvs_write_port_irq_mask(mvi, i, tmp);
208
209 msleep(100);
210 mvs_update_phyinfo(mvi, i, 1);
211 }
212
213 /* FIXME: update wide port bitmaps */
214
215 /* little endian for open address and command table, etc. */
216 /*
217 * it seems that ( from the spec ) turning on big-endian won't
218 * do us any good on big-endian machines, need further confirmation
219 */
220 cctl = mr32(MVS_CTL);
221 cctl |= CCTL_ENDIAN_CMD;
222 cctl |= CCTL_ENDIAN_DATA;
223 cctl &= ~CCTL_ENDIAN_OPEN;
224 cctl |= CCTL_ENDIAN_RSP;
225 mw32_f(MVS_CTL, cctl);
226
227 /* reset CMD queue */
228 tmp = mr32(MVS_PCS);
229 tmp |= PCS_CMD_RST;
230 mw32(MVS_PCS, tmp);
231 /* interrupt coalescing may cause missing HW interrput in some case,
232 * and the max count is 0x1ff, while our max slot is 0x200,
233 * it will make count 0.
234 */
235 tmp = 0;
236 mw32(MVS_INT_COAL, tmp);
237
238 tmp = 0x100;
239 mw32(MVS_INT_COAL_TMOUT, tmp);
240
241 /* ladies and gentlemen, start your engines */
242 mw32(MVS_TX_CFG, 0);
243 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
244 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
245 /* enable CMD/CMPL_Q/RESP mode */
246 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
247 PCS_CMD_EN | PCS_CMD_STOP_ERR);
248
249 /* enable completion queue interrupt */
250 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
251 CINT_DMA_PCIE);
252 tmp |= CINT_PHY_MASK;
253 mw32(MVS_INT_MASK, tmp);
254
255 /* Enable SRS interrupt */
256 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
257
258 return 0;
259}
260
261static int mvs_94xx_ioremap(struct mvs_info *mvi)
262{
263 if (!mvs_ioremap(mvi, 2, -1)) {
264 mvi->regs_ex = mvi->regs + 0x10200;
265 mvi->regs += 0x20000;
266 if (mvi->id == 1)
267 mvi->regs += 0x4000;
268 return 0;
269 }
270 return -1;
271}
272
273static void mvs_94xx_iounmap(struct mvs_info *mvi)
274{
275 if (mvi->regs) {
276 mvi->regs -= 0x20000;
277 if (mvi->id == 1)
278 mvi->regs -= 0x4000;
279 mvs_iounmap(mvi->regs);
280 }
281}
282
283static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
284{
285 void __iomem *regs = mvi->regs_ex;
286 u32 tmp;
287
288 tmp = mr32(MVS_GBL_CTL);
289 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
290 mw32(MVS_GBL_INT_STAT, tmp);
291 writel(tmp, regs + 0x0C);
292 writel(tmp, regs + 0x10);
293 writel(tmp, regs + 0x14);
294 writel(tmp, regs + 0x18);
295 mw32(MVS_GBL_CTL, tmp);
296}
297
298static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
299{
300 void __iomem *regs = mvi->regs_ex;
301 u32 tmp;
302
303 tmp = mr32(MVS_GBL_CTL);
304
305 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
306 mw32(MVS_GBL_INT_STAT, tmp);
307 writel(tmp, regs + 0x0C);
308 writel(tmp, regs + 0x10);
309 writel(tmp, regs + 0x14);
310 writel(tmp, regs + 0x18);
311 mw32(MVS_GBL_CTL, tmp);
312}
313
314static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
315{
316 void __iomem *regs = mvi->regs_ex;
317 u32 stat = 0;
318 if (!(mvi->flags & MVF_FLAG_SOC)) {
319 stat = mr32(MVS_GBL_INT_STAT);
320
321 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
322 return 0;
323 }
324 return stat;
325}
326
327static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
328{
329 void __iomem *regs = mvi->regs;
330
331 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
332 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
333 mw32_f(MVS_INT_STAT, CINT_DONE);
334 #ifndef MVS_USE_TASKLET
335 spin_lock(&mvi->lock);
336 #endif
337 mvs_int_full(mvi);
338 #ifndef MVS_USE_TASKLET
339 spin_unlock(&mvi->lock);
340 #endif
341 }
342 return IRQ_HANDLED;
343}
344
345static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
346{
347 u32 tmp;
348 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
349 do {
350 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
351 } while (tmp & 1 << (slot_idx % 32));
352}
353
354static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
355 u32 tfs)
356{
357 void __iomem *regs = mvi->regs;
358 u32 tmp;
359
360 if (type == PORT_TYPE_SATA) {
361 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
362 mw32(MVS_INT_STAT_SRS_0, tmp);
363 }
364 mw32(MVS_INT_STAT, CINT_CI_STOP);
365 tmp = mr32(MVS_PCS) | 0xFF00;
366 mw32(MVS_PCS, tmp);
367}
368
369static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
370{
371 void __iomem *regs = mvi->regs;
372 u32 tmp;
373 u8 reg_set = *tfs;
374
375 if (*tfs == MVS_ID_NOT_MAPPED)
376 return;
377
378 mvi->sata_reg_set &= ~bit(reg_set);
379 if (reg_set < 32) {
380 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
381 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
382 if (tmp)
383 mw32(MVS_INT_STAT_SRS_0, tmp);
384 } else {
385 w_reg_set_enable(reg_set, mvi->sata_reg_set);
386 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
387 if (tmp)
388 mw32(MVS_INT_STAT_SRS_1, tmp);
389 }
390
391 *tfs = MVS_ID_NOT_MAPPED;
392
393 return;
394}
395
396static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
397{
398 int i;
399 void __iomem *regs = mvi->regs;
400
401 if (*tfs != MVS_ID_NOT_MAPPED)
402 return 0;
403
404 i = mv_ffc64(mvi->sata_reg_set);
405 if (i > 32) {
406 mvi->sata_reg_set |= bit(i);
407 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
408 *tfs = i;
409 return 0;
410 } else if (i >= 0) {
411 mvi->sata_reg_set |= bit(i);
412 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
413 *tfs = i;
414 return 0;
415 }
416 return MVS_ID_NOT_MAPPED;
417}
418
419static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
420{
421 int i;
422 struct scatterlist *sg;
423 struct mvs_prd *buf_prd = prd;
424 for_each_sg(scatter, sg, nr, i) {
425 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
426 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
427 buf_prd++;
428 }
429}
430
431static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
432{
433 u32 phy_st;
434 phy_st = mvs_read_phy_ctl(mvi, i);
435 if (phy_st & PHY_READY_MASK) /* phy ready */
436 return 1;
437 return 0;
438}
439
440static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
441 struct sas_identify_frame *id)
442{
443 int i;
444 u32 id_frame[7];
445
446 for (i = 0; i < 7; i++) {
447 mvs_write_port_cfg_addr(mvi, port_id,
448 CONFIG_ID_FRAME0 + i * 4);
449 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
450 }
451 memcpy(id, id_frame, 28);
452}
453
454static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
455 struct sas_identify_frame *id)
456{
457 int i;
458 u32 id_frame[7];
459
460 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
461 for (i = 0; i < 7; i++) {
462 mvs_write_port_cfg_addr(mvi, port_id,
463 CONFIG_ATT_ID_FRAME0 + i * 4);
464 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
465 mv_dprintk("94xx phy %d atta frame %d %x.\n",
466 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
467 }
468 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
469 memcpy(id, id_frame, 28);
470}
471
472static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
473{
474 u32 att_dev_info = 0;
475
476 att_dev_info |= id->dev_type;
477 if (id->stp_iport)
478 att_dev_info |= PORT_DEV_STP_INIT;
479 if (id->smp_iport)
480 att_dev_info |= PORT_DEV_SMP_INIT;
481 if (id->ssp_iport)
482 att_dev_info |= PORT_DEV_SSP_INIT;
483 if (id->stp_tport)
484 att_dev_info |= PORT_DEV_STP_TRGT;
485 if (id->smp_tport)
486 att_dev_info |= PORT_DEV_SMP_TRGT;
487 if (id->ssp_tport)
488 att_dev_info |= PORT_DEV_SSP_TRGT;
489
490 att_dev_info |= (u32)id->phy_id<<24;
491 return att_dev_info;
492}
493
494static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
495{
496 return mvs_94xx_make_dev_info(id);
497}
498
499static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
500 struct sas_identify_frame *id)
501{
502 struct mvs_phy *phy = &mvi->phy[i];
503 struct asd_sas_phy *sas_phy = &phy->sas_phy;
504 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
505 sas_phy->linkrate =
506 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
507 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
508 sas_phy->linkrate += 0x8;
509 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
510 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
511 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
512 mvs_94xx_get_dev_identify_frame(mvi, i, id);
513 phy->dev_info = mvs_94xx_make_dev_info(id);
514
515 if (phy->phy_type & PORT_TYPE_SAS) {
516 mvs_94xx_get_att_identify_frame(mvi, i, id);
517 phy->att_dev_info = mvs_94xx_make_att_info(id);
518 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
519 } else {
520 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
521 }
522
523}
524
525void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
526 struct sas_phy_linkrates *rates)
527{
528 /* TODO */
529}
530
531static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
532{
533 u32 tmp;
534 void __iomem *regs = mvi->regs;
535 tmp = mr32(MVS_STP_REG_SET_0);
536 mw32(MVS_STP_REG_SET_0, 0);
537 mw32(MVS_STP_REG_SET_0, tmp);
538 tmp = mr32(MVS_STP_REG_SET_1);
539 mw32(MVS_STP_REG_SET_1, 0);
540 mw32(MVS_STP_REG_SET_1, tmp);
541}
542
543
544u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
545{
546 void __iomem *regs = mvi->regs_ex - 0x10200;
547 return mr32(SPI_RD_DATA_REG_94XX);
548}
549
550void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
551{
552 void __iomem *regs = mvi->regs_ex - 0x10200;
553 mw32(SPI_RD_DATA_REG_94XX, data);
554}
555
556
557int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
558 u32 *dwCmd,
559 u8 cmd,
560 u8 read,
561 u8 length,
562 u32 addr
563 )
564{
565 void __iomem *regs = mvi->regs_ex - 0x10200;
566 u32 dwTmp;
567
568 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
569 if (read)
570 dwTmp |= SPI_CTRL_READ_94XX;
571
572 if (addr != MV_MAX_U32) {
573 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
574 dwTmp |= SPI_ADDR_VLD_94XX;
575 }
576
577 *dwCmd = dwTmp;
578 return 0;
579}
580
581
582int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
583{
584 void __iomem *regs = mvi->regs_ex - 0x10200;
585 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
586
587 return 0;
588}
589
590int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
591{
592 void __iomem *regs = mvi->regs_ex - 0x10200;
593 u32 i, dwTmp;
594
595 for (i = 0; i < timeout; i++) {
596 dwTmp = mr32(SPI_CTRL_REG_94XX);
597 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
598 return 0;
599 msleep(10);
600 }
601
602 return -1;
603}
604
605#ifndef DISABLE_HOTPLUG_DMA_FIX
606void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
607{
608 int i;
609 struct mvs_prd *buf_prd = prd;
610 buf_prd += from;
611 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
612 buf_prd->addr = cpu_to_le64(buf_dma);
613 buf_prd->im_len.len = cpu_to_le32(buf_len);
614 ++buf_prd;
615 }
616}
617#endif
618
619const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx",
621 mvs_94xx_init,
622 NULL,
623 mvs_94xx_ioremap,
624 mvs_94xx_iounmap,
625 mvs_94xx_isr,
626 mvs_94xx_isr_status,
627 mvs_94xx_interrupt_enable,
628 mvs_94xx_interrupt_disable,
629 mvs_read_phy_ctl,
630 mvs_write_phy_ctl,
631 mvs_read_port_cfg_data,
632 mvs_write_port_cfg_data,
633 mvs_write_port_cfg_addr,
634 mvs_read_port_vsr_data,
635 mvs_write_port_vsr_data,
636 mvs_write_port_vsr_addr,
637 mvs_read_port_irq_stat,
638 mvs_write_port_irq_stat,
639 mvs_read_port_irq_mask,
640 mvs_write_port_irq_mask,
641 mvs_get_sas_addr,
642 mvs_94xx_command_active,
643 mvs_94xx_issue_stop,
644 mvs_start_delivery,
645 mvs_rx_update,
646 mvs_int_full,
647 mvs_94xx_assign_reg_set,
648 mvs_94xx_free_reg_set,
649 mvs_get_prd_size,
650 mvs_get_prd_count,
651 mvs_94xx_make_prd,
652 mvs_94xx_detect_porttype,
653 mvs_94xx_oob_done,
654 mvs_94xx_fix_phy_info,
655 NULL,
656 mvs_94xx_phy_set_link_rate,
657 mvs_hw_max_link_rate,
658 mvs_94xx_phy_disable,
659 mvs_94xx_phy_enable,
660 mvs_94xx_phy_reset,
661 NULL,
662 mvs_94xx_clear_active_cmds,
663 mvs_94xx_spi_read_data,
664 mvs_94xx_spi_write_data,
665 mvs_94xx_spi_buildcmd,
666 mvs_94xx_spi_issuecmd,
667 mvs_94xx_spi_waitdataready,
668#ifndef DISABLE_HOTPLUG_DMA_FIX
669 mvs_94xx_fix_dma,
670#endif
671};
672
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 000000000000..23ed9b164669
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
1/*
2 * Marvell 88SE94xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS94XX_REG_H_
26#define _MVS94XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
31
32enum hw_registers {
33 MVS_GBL_CTL = 0x04, /* global control */
34 MVS_GBL_INT_STAT = 0x00, /* global irq status */
35 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
36
37 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
38 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
39
40 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
41
42 MVS_CTL = 0x100, /* SAS/SATA port configuration */
43 MVS_PCS = 0x104, /* SAS/SATA port control/status */
44 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
45 MVS_CMD_LIST_HI = 0x10C,
46 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
47 MVS_RX_FIS_HI = 0x114,
48 MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
49 MVS_STP_REG_SET_1 = 0x11C,
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67 MVS_INT_STAT_SRS_1 = 0x160,
68 MVS_INT_MASK_SRS_1 = 0x164,
69 MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
70 MVS_NON_NCQ_ERR_1 = 0x16C,
71 MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
72 MVS_CMD_DATA = 0x174, /* Command register port (data) */
73 MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
77 MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
78 /* ports 5-7 follow after this */
79 MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
80 MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
81
82 /* ports 1-3 follow after this */
83 MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
84 /* ports 5-7 follow after this */
85 MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
86
87 /* ports 1-3 follow after this */
88 MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
89 MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
90 /* ports 5-7 follow after this */
91 MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
92 MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
93
94 /* phys 1-3 follow after this */
95 MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
96 MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
97 /* phys 1-3 follow after this */
98 /* multiplexing */
99 MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
100 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
101 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
102 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
103};
104
105enum pci_cfg_registers {
106 PCR_PHY_CTL = 0x40,
107 PCR_PHY_CTL2 = 0x90,
108 PCR_DEV_CTRL = 0x78,
109 PCR_LINK_STAT = 0x82,
110};
111
112/* SAS/SATA Vendor Specific Port Registers */
113enum sas_sata_vsp_regs {
114 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
115 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
116 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
117 VSR_PHY_MODE3 = 0x03 * 4, /* pll */
118 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
119 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
120 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
121 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
122 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
123 VSR_PHY_MODE9 = 0x09 * 4, /* Test */
124 VSR_PHY_MODE10 = 0x0A * 4, /* Power */
125 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
126 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
127 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
128};
129
130enum chip_register_bits {
131 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
132 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
135 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
136};
137
138enum pci_interrupt_cause {
139 /* MAIN_IRQ_CAUSE (R10200) Bits*/
140 IRQ_COM_IN_I2O_IOP0 = (1 << 0),
141 IRQ_COM_IN_I2O_IOP1 = (1 << 1),
142 IRQ_COM_IN_I2O_IOP2 = (1 << 2),
143 IRQ_COM_IN_I2O_IOP3 = (1 << 3),
144 IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
145 IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
146 IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
147 IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
148 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
149 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
150 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
151 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
152 IRQ_PCIF_DRBL0 = (1 << 12),
153 IRQ_PCIF_DRBL1 = (1 << 13),
154 IRQ_PCIF_DRBL2 = (1 << 14),
155 IRQ_PCIF_DRBL3 = (1 << 15),
156 IRQ_XOR_A = (1 << 16),
157 IRQ_XOR_B = (1 << 17),
158 IRQ_SAS_A = (1 << 18),
159 IRQ_SAS_B = (1 << 19),
160 IRQ_CPU_CNTRL = (1 << 20),
161 IRQ_GPIO = (1 << 21),
162 IRQ_UART = (1 << 22),
163 IRQ_SPI = (1 << 23),
164 IRQ_I2C = (1 << 24),
165 IRQ_SGPIO = (1 << 25),
166 IRQ_COM_ERR = (1 << 29),
167 IRQ_I2O_ERR = (1 << 30),
168 IRQ_PCIE_ERR = (1 << 31),
169};
170
171#define MAX_SG_ENTRY 255
172
173struct mvs_prd_imt {
174 __le32 len:22;
175 u8 _r_a:2;
176 u8 misc_ctl:4;
177 u8 inter_sel:4;
178};
179
180struct mvs_prd {
181 /* 64-bit buffer address */
182 __le64 addr;
183 /* 22-bit length */
184 struct mvs_prd_imt im_len;
185} __attribute__ ((packed));
186
187#define SPI_CTRL_REG_94XX 0xc800
188#define SPI_ADDR_REG_94XX 0xc804
189#define SPI_WR_DATA_REG_94XX 0xc808
190#define SPI_RD_DATA_REG_94XX 0xc80c
191#define SPI_CTRL_READ_94XX (1U << 2)
192#define SPI_ADDR_VLD_94XX (1U << 1)
193#define SPI_CTRL_SpiStart_94XX (1U << 0)
194
195#define mv_ffc(x) ffz(x)
196
197static inline int
198mv_ffc64(u64 v)
199{
200 int i;
201 i = mv_ffc((u32)v);
202 if (i >= 0)
203 return i;
204 i = mv_ffc((u32)(v>>32));
205
206 if (i != 0)
207 return 32 + i;
208
209 return -1;
210}
211
212#define r_reg_set_enable(i) \
213 (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
214 mr32(MVS_STP_REG_SET_0))
215
216#define w_reg_set_enable(i, tmp) \
217 (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
218 mw32(MVS_STP_REG_SET_0, tmp))
219
220extern const struct mvs_dispatch mvs_94xx_dispatch;
221#endif
222
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 000000000000..a67e1c4172f9
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,280 @@
1/*
2 * Marvell 88SE64xx/88SE94xx register IO interface
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#ifndef _MV_CHIPS_H_
27#define _MV_CHIPS_H_
28
29#define mr32(reg) readl(regs + reg)
30#define mw32(reg, val) writel((val), regs + reg)
31#define mw32_f(reg, val) do { \
32 mw32(reg, val); \
33 mr32(reg); \
34 } while (0)
35
36#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
37#define ior32(reg) inl((unsigned long)(regs + reg))
38#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
39#define ior16(reg) inw((unsigned long)(regs + reg))
40#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
41#define ior8(reg) inb((unsigned long)(regs + reg))
42
43static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
44{
45 void __iomem *regs = mvi->regs;
46 mw32(MVS_CMD_ADDR, addr);
47 return mr32(MVS_CMD_DATA);
48}
49
50static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
51{
52 void __iomem *regs = mvi->regs;
53 mw32(MVS_CMD_ADDR, addr);
54 mw32(MVS_CMD_DATA, val);
55}
56
57static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
58{
59 void __iomem *regs = mvi->regs;
60 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
61 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
62}
63
64static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
65{
66 void __iomem *regs = mvi->regs;
67 if (port < 4)
68 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
69 else
70 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
71}
72
73static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
74 u32 off2, u32 port)
75{
76 void __iomem *regs = mvi->regs + off;
77 void __iomem *regs2 = mvi->regs + off2;
78 return (port < 4) ? readl(regs + port * 8) :
79 readl(regs2 + (port - 4) * 8);
80}
81
82static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
83 u32 port, u32 val)
84{
85 void __iomem *regs = mvi->regs + off;
86 void __iomem *regs2 = mvi->regs + off2;
87 if (port < 4)
88 writel(val, regs + port * 8);
89 else
90 writel(val, regs2 + (port - 4) * 8);
91}
92
93static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
94{
95 return mvs_read_port(mvi, MVS_P0_CFG_DATA,
96 MVS_P4_CFG_DATA, port);
97}
98
99static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
100 u32 port, u32 val)
101{
102 mvs_write_port(mvi, MVS_P0_CFG_DATA,
103 MVS_P4_CFG_DATA, port, val);
104}
105
106static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
107 u32 port, u32 addr)
108{
109 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
110 MVS_P4_CFG_ADDR, port, addr);
111 mdelay(10);
112}
113
114static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
115{
116 return mvs_read_port(mvi, MVS_P0_VSR_DATA,
117 MVS_P4_VSR_DATA, port);
118}
119
120static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
121 u32 port, u32 val)
122{
123 mvs_write_port(mvi, MVS_P0_VSR_DATA,
124 MVS_P4_VSR_DATA, port, val);
125}
126
127static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
128 u32 port, u32 addr)
129{
130 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
131 MVS_P4_VSR_ADDR, port, addr);
132 mdelay(10);
133}
134
135static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
136{
137 return mvs_read_port(mvi, MVS_P0_INT_STAT,
138 MVS_P4_INT_STAT, port);
139}
140
141static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
142 u32 port, u32 val)
143{
144 mvs_write_port(mvi, MVS_P0_INT_STAT,
145 MVS_P4_INT_STAT, port, val);
146}
147
148static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
149{
150 return mvs_read_port(mvi, MVS_P0_INT_MASK,
151 MVS_P4_INT_MASK, port);
152
153}
154
155static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
156 u32 port, u32 val)
157{
158 mvs_write_port(mvi, MVS_P0_INT_MASK,
159 MVS_P4_INT_MASK, port, val);
160}
161
162static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
163{
164 u32 tmp;
165
166 /* workaround for SATA R-ERR, to ignore phy glitch */
167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
168 tmp &= ~(1 << 9);
169 tmp |= (1 << 10);
170 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
171
172 /* enable retry 127 times */
173 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
174
175 /* extend open frame timeout to max */
176 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
177 tmp &= ~0xffff;
178 tmp |= 0x3fff;
179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
180
181 /* workaround for WDTIMEOUT , set to 550 ms */
182 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
183
184 /* not to halt for different port op during wideport link change */
185 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
186
187 /* workaround for Seagate disk not-found OOB sequence, recv
188 * COMINIT before sending out COMWAKE */
189 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
190 tmp &= 0x0000ffff;
191 tmp |= 0x00fa0000;
192 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
193
194 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
195 tmp &= 0x1fffffff;
196 tmp |= (2U << 29); /* 8 ms retry */
197 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
198}
199
200static inline void mvs_int_sata(struct mvs_info *mvi)
201{
202 u32 tmp;
203 void __iomem *regs = mvi->regs;
204 tmp = mr32(MVS_INT_STAT_SRS_0);
205 if (tmp)
206 mw32(MVS_INT_STAT_SRS_0, tmp);
207 MVS_CHIP_DISP->clear_active_cmds(mvi);
208}
209
210static inline void mvs_int_full(struct mvs_info *mvi)
211{
212 void __iomem *regs = mvi->regs;
213 u32 tmp, stat;
214 int i;
215
216 stat = mr32(MVS_INT_STAT);
217 mvs_int_rx(mvi, false);
218
219 for (i = 0; i < mvi->chip->n_phy; i++) {
220 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
221 if (tmp)
222 mvs_int_port(mvi, i, tmp);
223 }
224
225 if (stat & CINT_SRS)
226 mvs_int_sata(mvi);
227
228 mw32(MVS_INT_STAT, stat);
229}
230
231static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
232{
233 void __iomem *regs = mvi->regs;
234 mw32(MVS_TX_PROD_IDX, tx);
235}
236
237static inline u32 mvs_rx_update(struct mvs_info *mvi)
238{
239 void __iomem *regs = mvi->regs;
240 return mr32(MVS_RX_CONS_IDX);
241}
242
243static inline u32 mvs_get_prd_size(void)
244{
245 return sizeof(struct mvs_prd);
246}
247
248static inline u32 mvs_get_prd_count(void)
249{
250 return MAX_SG_ENTRY;
251}
252
253static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
254{
255 u16 link_stat, link_spd;
256 const char *spd[] = {
257 "UnKnown",
258 "2.5",
259 "5.0",
260 };
261 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
262 return;
263
264 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
265 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
266 if (link_spd >= 3)
267 link_spd = 0;
268 dev_printk(KERN_INFO, mvi->dev,
269 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
270 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
271 spd[link_spd]);
272}
273
274static inline u32 mvs_hw_max_link_rate(void)
275{
276 return MAX_LINK_RATE;
277}
278
279#endif /* _MV_CHIPS_H_ */
280
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 000000000000..f8cb9defb961
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,502 @@
1/*
2 * Marvell 88SE64xx/88SE94xx const head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_DEFS_H_
26#define _MV_DEFS_H_
27
28
29enum chip_flavors {
30 chip_6320,
31 chip_6440,
32 chip_6485,
33 chip_9480,
34 chip_9180,
35};
36
37/* driver compile-time configuration */
38enum driver_configuration {
39 MVS_SLOTS = 512, /* command slots */
40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
42 /* software requires power-of-2
43 ring size */
44 MVS_SOC_SLOTS = 64,
45 MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
46 MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
47
48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
51 MVS_OAF_SZ = 64, /* Open address frame buffer size */
52 MVS_QUEUE_SIZE = 32, /* Support Queue depth */
53 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
54 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
55};
56
57/* unchangeable hardware details */
58enum hardware_details {
59 MVS_MAX_PHYS = 8, /* max. possible phys */
60 MVS_MAX_PORTS = 8, /* max. possible ports */
61 MVS_SOC_PHYS = 4, /* soc phys */
62 MVS_SOC_PORTS = 4, /* soc phys */
63 MVS_MAX_DEVICES = 1024, /* max supported device */
64};
65
66/* peripheral registers (BAR2) */
67enum peripheral_registers {
68 SPI_CTL = 0x10, /* EEPROM control */
69 SPI_CMD = 0x14, /* EEPROM command */
70 SPI_DATA = 0x18, /* EEPROM data */
71};
72
73enum peripheral_register_bits {
74 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
75 TWSI_RD = (1U << 4), /* EEPROM read access */
76
77 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
78};
79
80enum hw_register_bits {
81 /* MVS_GBL_CTL */
82 INT_EN = (1U << 1), /* Global int enable */
83 HBA_RST = (1U << 0), /* HBA reset */
84
85 /* MVS_GBL_INT_STAT */
86 INT_XOR = (1U << 4), /* XOR engine event */
87 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
88
89 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
90 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
91 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
92 MODE_AUTO_DET_PORT6 = (1U << 14),
93 MODE_AUTO_DET_PORT5 = (1U << 13),
94 MODE_AUTO_DET_PORT4 = (1U << 12),
95 MODE_AUTO_DET_PORT3 = (1U << 11),
96 MODE_AUTO_DET_PORT2 = (1U << 10),
97 MODE_AUTO_DET_PORT1 = (1U << 9),
98 MODE_AUTO_DET_PORT0 = (1U << 8),
99 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
100 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
101 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
102 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
103 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
104 MODE_SAS_PORT6_MASK = (1U << 6),
105 MODE_SAS_PORT5_MASK = (1U << 5),
106 MODE_SAS_PORT4_MASK = (1U << 4),
107 MODE_SAS_PORT3_MASK = (1U << 3),
108 MODE_SAS_PORT2_MASK = (1U << 2),
109 MODE_SAS_PORT1_MASK = (1U << 1),
110 MODE_SAS_PORT0_MASK = (1U << 0),
111 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
112 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
113 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
114 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
115
116 /* SAS_MODE value may be
117 * dictated (in hw) by values
118 * of SATA_TARGET & AUTO_DET
119 */
120
121 /* MVS_TX_CFG */
122 TX_EN = (1U << 16), /* Enable TX */
123 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
124
125 /* MVS_RX_CFG */
126 RX_EN = (1U << 16), /* Enable RX */
127 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
128
129 /* MVS_INT_COAL */
130 COAL_EN = (1U << 16), /* Enable int coalescing */
131
132 /* MVS_INT_STAT, MVS_INT_MASK */
133 CINT_I2C = (1U << 31), /* I2C event */
134 CINT_SW0 = (1U << 30), /* software event 0 */
135 CINT_SW1 = (1U << 29), /* software event 1 */
136 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
137 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
138 CINT_MEM = (1U << 26), /* int mem parity err */
139 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
140 CINT_SRS = (1U << 3), /* SRS event */
141 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
142 CINT_DONE = (1U << 0), /* cmd completion */
143
144 /* shl for ports 1-3 */
145 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
146 CINT_PORT = (1U << 8), /* port0 event */
147 CINT_PORT_MASK_OFFSET = 8,
148 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
149 CINT_PHY_MASK_OFFSET = 4,
150 CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
151
152 /* TX (delivery) ring bits */
153 TXQ_CMD_SHIFT = 29,
154 TXQ_CMD_SSP = 1, /* SSP protocol */
155 TXQ_CMD_SMP = 2, /* SMP protocol */
156 TXQ_CMD_STP = 3, /* STP/SATA protocol */
157 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
158 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
159 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
160 TXQ_MODE_TARGET = 0,
161 TXQ_MODE_INITIATOR = 1,
162 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
163 TXQ_PRI_NORMAL = 0,
164 TXQ_PRI_HIGH = 1,
165 TXQ_SRS_SHIFT = 20, /* SATA register set */
166 TXQ_SRS_MASK = 0x7f,
167 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
168 TXQ_PHY_MASK = 0xff,
169 TXQ_SLOT_MASK = 0xfff, /* slot number */
170
171 /* RX (completion) ring bits */
172 RXQ_GOOD = (1U << 23), /* Response good */
173 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
174 RXQ_CMD_RX = (1U << 20), /* target cmd received */
175 RXQ_ATTN = (1U << 19), /* attention */
176 RXQ_RSP = (1U << 18), /* response frame xfer'd */
177 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
178 RXQ_DONE = (1U << 16), /* cmd complete */
179 RXQ_SLOT_MASK = 0xfff, /* slot number */
180
181 /* mvs_cmd_hdr bits */
182 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
183 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
184
185 /* SSP initiator only */
186 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
187
188 /* SSP initiator or target */
189 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
190
191 /* SSP target only */
192 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
193 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
194 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
195 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
196
197 MCH_SSP_MODE_PASSTHRU = 1,
198 MCH_SSP_MODE_NORMAL = 0,
199 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
200 MCH_FBURST = (1U << 11), /* first burst (SSP) */
201 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
202 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
203 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
204 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
205 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
206 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
207 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
208 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
209
210 CCTL_RST = (1U << 5), /* port logic reset */
211
212 /* 0(LSB first), 1(MSB first) */
213 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
214 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
215 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
216 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
217
218 /* MVS_Px_SER_CTLSTAT (per-phy control) */
219 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
220 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
221 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
222 PHY_RST = (1U << 0), /* phy reset */
223 PHY_READY_MASK = (1U << 20),
224
225 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
226 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
227 PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
228 PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
229 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
230 PHYEV_AN = (1U << 18), /* SATA async notification */
231 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
232 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
233 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
234 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
235 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
236 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
237 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
238 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
239 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
240 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
241 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
242 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
243 PHYEV_ID_DONE = (1U << 2), /* identify done */
244 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
245 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
246
247 /* MVS_PCS */
248 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
249 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
250 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
251 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
252 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
253 PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
254 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
255 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
256 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
257 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
258 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
259
260 /* Port n Attached Device Info */
261 PORT_DEV_SSP_TRGT = (1U << 19),
262 PORT_DEV_SMP_TRGT = (1U << 18),
263 PORT_DEV_STP_TRGT = (1U << 17),
264 PORT_DEV_SSP_INIT = (1U << 11),
265 PORT_DEV_SMP_INIT = (1U << 10),
266 PORT_DEV_STP_INIT = (1U << 9),
267 PORT_PHY_ID_MASK = (0xFFU << 24),
268 PORT_SSP_TRGT_MASK = (0x1U << 19),
269 PORT_SSP_INIT_MASK = (0x1U << 11),
270 PORT_DEV_TRGT_MASK = (0x7U << 17),
271 PORT_DEV_INIT_MASK = (0x7U << 9),
272 PORT_DEV_TYPE_MASK = (0x7U << 0),
273
274 /* Port n PHY Status */
275 PHY_RDY = (1U << 2),
276 PHY_DW_SYNC = (1U << 1),
277 PHY_OOB_DTCTD = (1U << 0),
278
279 /* VSR */
280 /* PHYMODE 6 (CDB) */
281 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
282 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
283 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
284 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
285 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
286 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
287 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
288 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
289 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
290 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
291 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
292 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
293 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
294 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
295};
296
297/* SAS/SATA configuration port registers, aka phy registers */
298enum sas_sata_config_port_regs {
299 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
300 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
301 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
302 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
303 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
304 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
305 PHYR_SATA_CTL = 0x18, /* SATA control */
306 PHYR_PHY_STAT = 0x1C, /* PHY status */
307 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
308 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
309 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
310 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
311 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
312 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
313 PHYR_WIDE_PORT = 0x38, /* wide port participating */
314 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
315 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
316 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
317 CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
318 CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
319 CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
320 CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
321 CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
322 CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
323 CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
324 CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
325 CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
326 CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
327 CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
328 CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
329 CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
330 CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
331};
332
333enum sas_cmd_port_registers {
334 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
335 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
336 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
337 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
338 CMD_OOB_SPACE = 0x110, /* OOB space control register */
339 CMD_OOB_BURST = 0x114, /* OOB burst control register */
340 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
341 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
342 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
343 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
344 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
345 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
346 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
347 CMD_ID_TEST = 0x134, /* ID test register */
348 CMD_PL_TIMER = 0x138, /* PL timer register */
349 CMD_WD_TIMER = 0x13c, /* WD timer register */
350 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
351 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
352 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
353 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
354 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
355 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
356 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
357 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
358 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
359 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
360 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
361 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
362 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
363 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
364 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
365 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
366 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
367 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
368 CMD_RESET_COUNT = 0x188, /* Reset Count */
369 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
370 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
371 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
372 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
373 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
374 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
375 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
376 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
377 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
378 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
379 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
380 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
381 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
382 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
383 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
384};
385
386enum mvs_info_flags {
387 MVF_MSI = (1U << 0), /* MSI is enabled */
388 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
389 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
390};
391
392enum mvs_event_flags {
393 PHY_PLUG_EVENT = (3U),
394 PHY_PLUG_IN = (1U << 0), /* phy plug in */
395 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
396};
397
398enum mvs_port_type {
399 PORT_TGT_MASK = (1U << 5),
400 PORT_INIT_PORT = (1U << 4),
401 PORT_TGT_PORT = (1U << 3),
402 PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
403 PORT_TYPE_SAS = (1U << 1),
404 PORT_TYPE_SATA = (1U << 0),
405};
406
407/* Command Table Format */
408enum ct_format {
409 /* SSP */
410 SSP_F_H = 0x00,
411 SSP_F_IU = 0x18,
412 SSP_F_MAX = 0x4D,
413 /* STP */
414 STP_CMD_FIS = 0x00,
415 STP_ATAPI_CMD = 0x40,
416 STP_F_MAX = 0x10,
417 /* SMP */
418 SMP_F_T = 0x00,
419 SMP_F_DEP = 0x01,
420 SMP_F_MAX = 0x101,
421};
422
423enum status_buffer {
424 SB_EIR_OFF = 0x00, /* Error Information Record */
425 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
426 SB_RFB_MAX = 0x400, /* RFB size*/
427};
428
429enum error_info_rec {
430 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
431 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
432 RSP_OVER = (1U << 29), /* rsp buffer overflow */
433 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
434 UNK_FIS = (1U << 27), /* unknown FIS */
435 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
436 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
437 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
438 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
439 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
440 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
441 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
442 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
443 INTERLOCK = (1U << 15), /* interlock error */
444 NAK = (1U << 14), /* NAK rx'd */
445 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
446 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
447 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
448 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
449 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
450 STP_RES_BSY = (1U << 8), /* STP resources busy */
451 BREAK = (1U << 7), /* break received */
452 BAD_DEST = (1U << 6), /* bad destination */
453 BAD_PROTO = (1U << 5), /* protocol not supported */
454 BAD_RATE = (1U << 4), /* cxn rate not supported */
455 WRONG_DEST = (1U << 3), /* wrong destination error */
456 CREDIT_TO = (1U << 2), /* credit timeout */
457 WDOG_TO = (1U << 1), /* watchdog timeout */
458 BUF_PAR = (1U << 0), /* buffer parity error */
459};
460
461enum error_info_rec_2 {
462 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
463 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
464 APP_CHK_ERR = (1U << 13), /* Application Check error */
465 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
466 USR_BLK_NM = (1U << 0), /* User Block Number */
467};
468
469enum pci_cfg_register_bits {
470 PCTL_PWR_OFF = (0xFU << 24),
471 PCTL_COM_ON = (0xFU << 20),
472 PCTL_LINK_RST = (0xFU << 16),
473 PCTL_LINK_OFFS = (16),
474 PCTL_PHY_DSBL = (0xFU << 12),
475 PCTL_PHY_DSBL_OFFS = (12),
476 PRD_REQ_SIZE = (0x4000),
477 PRD_REQ_MASK = (0x00007000),
478 PLS_NEG_LINK_WD = (0x3FU << 4),
479 PLS_NEG_LINK_WD_OFFS = 4,
480 PLS_LINK_SPD = (0x0FU << 0),
481 PLS_LINK_SPD_OFFS = 0,
482};
483
484enum open_frame_protocol {
485 PROTOCOL_SMP = 0x0,
486 PROTOCOL_SSP = 0x1,
487 PROTOCOL_STP = 0x2,
488};
489
490/* define for response frame datapres field */
491enum datapres_field {
492 NO_DATA = 0,
493 RESPONSE_DATA = 1,
494 SENSE_DATA = 2,
495};
496
497/* define task management IU */
498struct mvs_tmf_task{
499 u8 tmf;
500 u16 tag_of_task_to_be_managed;
501};
502#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 000000000000..8646a19f999d
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,703 @@
1/*
2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#include "mv_sas.h"
27
28static struct scsi_transport_template *mvs_stt;
29static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35};
36
37#define SOC_SAS_NUM 2
38
39static struct scsi_host_template mvs_sht = {
40 .module = THIS_MODULE,
41 .name = DRV_NAME,
42 .queuecommand = sas_queuecommand,
43 .target_alloc = sas_target_alloc,
44 .slave_configure = mvs_slave_configure,
45 .slave_destroy = sas_slave_destroy,
46 .scan_finished = mvs_scan_finished,
47 .scan_start = mvs_scan_start,
48 .change_queue_depth = sas_change_queue_depth,
49 .change_queue_type = sas_change_queue_type,
50 .bios_param = sas_bios_param,
51 .can_queue = 1,
52 .cmd_per_lun = 1,
53 .this_id = -1,
54 .sg_tablesize = SG_ALL,
55 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
56 .use_clustering = ENABLE_CLUSTERING,
57 .eh_device_reset_handler = sas_eh_device_reset_handler,
58 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
59 .slave_alloc = mvs_slave_alloc,
60 .target_destroy = sas_target_destroy,
61 .ioctl = sas_ioctl,
62};
63
64static struct sas_domain_function_template mvs_transport_ops = {
65 .lldd_dev_found = mvs_dev_found,
66 .lldd_dev_gone = mvs_dev_gone,
67
68 .lldd_execute_task = mvs_queue_command,
69 .lldd_control_phy = mvs_phy_control,
70
71 .lldd_abort_task = mvs_abort_task,
72 .lldd_abort_task_set = mvs_abort_task_set,
73 .lldd_clear_aca = mvs_clear_aca,
74 .lldd_clear_task_set = mvs_clear_task_set,
75 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
76 .lldd_lu_reset = mvs_lu_reset,
77 .lldd_query_task = mvs_query_task,
78
79 .lldd_port_formed = mvs_port_formed,
80 .lldd_port_deformed = mvs_port_deformed,
81
82};
83
84static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
85{
86 struct mvs_phy *phy = &mvi->phy[phy_id];
87 struct asd_sas_phy *sas_phy = &phy->sas_phy;
88
89 phy->mvi = mvi;
90 init_timer(&phy->timer);
91 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
92 sas_phy->class = SAS;
93 sas_phy->iproto = SAS_PROTOCOL_ALL;
94 sas_phy->tproto = 0;
95 sas_phy->type = PHY_TYPE_PHYSICAL;
96 sas_phy->role = PHY_ROLE_INITIATOR;
97 sas_phy->oob_mode = OOB_NOT_CONNECTED;
98 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
99
100 sas_phy->id = phy_id;
101 sas_phy->sas_addr = &mvi->sas_addr[0];
102 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
103 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
104 sas_phy->lldd_phy = phy;
105}
106
107static void mvs_free(struct mvs_info *mvi)
108{
109 int i;
110 struct mvs_wq *mwq;
111 int slot_nr;
112
113 if (!mvi)
114 return;
115
116 if (mvi->flags & MVF_FLAG_SOC)
117 slot_nr = MVS_SOC_SLOTS;
118 else
119 slot_nr = MVS_SLOTS;
120
121 for (i = 0; i < mvi->tags_num; i++) {
122 struct mvs_slot_info *slot = &mvi->slot_info[i];
123 if (slot->buf)
124 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
125 slot->buf, slot->buf_dma);
126 }
127
128 if (mvi->tx)
129 dma_free_coherent(mvi->dev,
130 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
131 mvi->tx, mvi->tx_dma);
132 if (mvi->rx_fis)
133 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
134 mvi->rx_fis, mvi->rx_fis_dma);
135 if (mvi->rx)
136 dma_free_coherent(mvi->dev,
137 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
138 mvi->rx, mvi->rx_dma);
139 if (mvi->slot)
140 dma_free_coherent(mvi->dev,
141 sizeof(*mvi->slot) * slot_nr,
142 mvi->slot, mvi->slot_dma);
143#ifndef DISABLE_HOTPLUG_DMA_FIX
144 if (mvi->bulk_buffer)
145 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
146 mvi->bulk_buffer, mvi->bulk_buffer_dma);
147#endif
148
149 MVS_CHIP_DISP->chip_iounmap(mvi);
150 if (mvi->shost)
151 scsi_host_put(mvi->shost);
152 list_for_each_entry(mwq, &mvi->wq_list, entry)
153 cancel_delayed_work(&mwq->work_q);
154 kfree(mvi);
155}
156
157#ifdef MVS_USE_TASKLET
158struct tasklet_struct mv_tasklet;
159static void mvs_tasklet(unsigned long opaque)
160{
161 unsigned long flags;
162 u32 stat;
163 u16 core_nr, i = 0;
164
165 struct mvs_info *mvi;
166 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
167
168 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
169 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
170
171 if (unlikely(!mvi))
172 BUG_ON(1);
173
174 for (i = 0; i < core_nr; i++) {
175 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
176 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
177 if (stat)
178 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
179 }
180
181}
182#endif
183
184static irqreturn_t mvs_interrupt(int irq, void *opaque)
185{
186 u32 core_nr, i = 0;
187 u32 stat;
188 struct mvs_info *mvi;
189 struct sas_ha_struct *sha = opaque;
190
191 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
192 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
193
194 if (unlikely(!mvi))
195 return IRQ_NONE;
196
197 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
198 if (!stat)
199 return IRQ_NONE;
200
201#ifdef MVS_USE_TASKLET
202 tasklet_schedule(&mv_tasklet);
203#else
204 for (i = 0; i < core_nr; i++) {
205 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
206 MVS_CHIP_DISP->isr(mvi, irq, stat);
207 }
208#endif
209 return IRQ_HANDLED;
210}
211
212static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
213{
214 int i, slot_nr;
215
216 if (mvi->flags & MVF_FLAG_SOC)
217 slot_nr = MVS_SOC_SLOTS;
218 else
219 slot_nr = MVS_SLOTS;
220
221 spin_lock_init(&mvi->lock);
222 for (i = 0; i < mvi->chip->n_phy; i++) {
223 mvs_phy_init(mvi, i);
224 mvi->port[i].wide_port_phymap = 0;
225 mvi->port[i].port_attached = 0;
226 INIT_LIST_HEAD(&mvi->port[i].list);
227 }
228 for (i = 0; i < MVS_MAX_DEVICES; i++) {
229 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
230 mvi->devices[i].dev_type = NO_DEVICE;
231 mvi->devices[i].device_id = i;
232 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
233 }
234
235 /*
236 * alloc and init our DMA areas
237 */
238 mvi->tx = dma_alloc_coherent(mvi->dev,
239 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
240 &mvi->tx_dma, GFP_KERNEL);
241 if (!mvi->tx)
242 goto err_out;
243 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
244 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
245 &mvi->rx_fis_dma, GFP_KERNEL);
246 if (!mvi->rx_fis)
247 goto err_out;
248 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
249
250 mvi->rx = dma_alloc_coherent(mvi->dev,
251 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
252 &mvi->rx_dma, GFP_KERNEL);
253 if (!mvi->rx)
254 goto err_out;
255 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
256 mvi->rx[0] = cpu_to_le32(0xfff);
257 mvi->rx_cons = 0xfff;
258
259 mvi->slot = dma_alloc_coherent(mvi->dev,
260 sizeof(*mvi->slot) * slot_nr,
261 &mvi->slot_dma, GFP_KERNEL);
262 if (!mvi->slot)
263 goto err_out;
264 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
265
266#ifndef DISABLE_HOTPLUG_DMA_FIX
267 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 TRASH_BUCKET_SIZE,
269 &mvi->bulk_buffer_dma, GFP_KERNEL);
270 if (!mvi->bulk_buffer)
271 goto err_out;
272#endif
273 for (i = 0; i < slot_nr; i++) {
274 struct mvs_slot_info *slot = &mvi->slot_info[i];
275
276 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
277 &slot->buf_dma, GFP_KERNEL);
278 if (!slot->buf) {
279 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
280 goto err_out;
281 }
282 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
283 ++mvi->tags_num;
284 }
285 /* Initialize tags */
286 mvs_tag_init(mvi);
287 return 0;
288err_out:
289 return 1;
290}
291
292
293int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
294{
295 unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
296 struct pci_dev *pdev = mvi->pdev;
297 if (bar_ex != -1) {
298 /*
299 * ioremap main and peripheral registers
300 */
301 res_start = pci_resource_start(pdev, bar_ex);
302 res_len = pci_resource_len(pdev, bar_ex);
303 if (!res_start || !res_len)
304 goto err_out;
305
306 res_flag_ex = pci_resource_flags(pdev, bar_ex);
307 if (res_flag_ex & IORESOURCE_MEM) {
308 if (res_flag_ex & IORESOURCE_CACHEABLE)
309 mvi->regs_ex = ioremap(res_start, res_len);
310 else
311 mvi->regs_ex = ioremap_nocache(res_start,
312 res_len);
313 } else
314 mvi->regs_ex = (void *)res_start;
315 if (!mvi->regs_ex)
316 goto err_out;
317 }
318
319 res_start = pci_resource_start(pdev, bar);
320 res_len = pci_resource_len(pdev, bar);
321 if (!res_start || !res_len)
322 goto err_out;
323
324 res_flag = pci_resource_flags(pdev, bar);
325 if (res_flag & IORESOURCE_CACHEABLE)
326 mvi->regs = ioremap(res_start, res_len);
327 else
328 mvi->regs = ioremap_nocache(res_start, res_len);
329
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
334 goto err_out;
335 }
336
337 return 0;
338err_out:
339 return -1;
340}
341
342void mvs_iounmap(void __iomem *regs)
343{
344 iounmap(regs);
345}
346
347static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
348 const struct pci_device_id *ent,
349 struct Scsi_Host *shost, unsigned int id)
350{
351 struct mvs_info *mvi;
352 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353
354 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
355 GFP_KERNEL);
356 if (!mvi)
357 return NULL;
358
359 mvi->pdev = pdev;
360 mvi->dev = &pdev->dev;
361 mvi->chip_id = ent->driver_data;
362 mvi->chip = &mvs_chips[mvi->chip_id];
363 INIT_LIST_HEAD(&mvi->wq_list);
364 mvi->irq = pdev->irq;
365
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
372#ifdef MVS_USE_TASKLET
373 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
374#endif
375
376 if (MVS_CHIP_DISP->chip_ioremap(mvi))
377 goto err_out;
378 if (!mvs_alloc(mvi, shost))
379 return mvi;
380err_out:
381 mvs_free(mvi);
382 return NULL;
383}
384
385/* move to PCI layer or libata core? */
386static int pci_go_64(struct pci_dev *pdev)
387{
388 int rc;
389
390 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
391 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
392 if (rc) {
393 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
394 if (rc) {
395 dev_printk(KERN_ERR, &pdev->dev,
396 "64-bit DMA enable failed\n");
397 return rc;
398 }
399 }
400 } else {
401 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
402 if (rc) {
403 dev_printk(KERN_ERR, &pdev->dev,
404 "32-bit DMA enable failed\n");
405 return rc;
406 }
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
408 if (rc) {
409 dev_printk(KERN_ERR, &pdev->dev,
410 "32-bit consistent DMA enable failed\n");
411 return rc;
412 }
413 }
414
415 return rc;
416}
417
418static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
419 const struct mvs_chip_info *chip_info)
420{
421 int phy_nr, port_nr; unsigned short core_nr;
422 struct asd_sas_phy **arr_phy;
423 struct asd_sas_port **arr_port;
424 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425
426 core_nr = chip_info->n_host;
427 phy_nr = core_nr * chip_info->n_phy;
428 port_nr = phy_nr;
429
430 memset(sha, 0x00, sizeof(struct sas_ha_struct));
431 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
432 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
433 if (!arr_phy || !arr_port)
434 goto exit_free;
435
436 sha->sas_phy = arr_phy;
437 sha->sas_port = arr_port;
438
439 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
440 if (!sha->lldd_ha)
441 goto exit_free;
442
443 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
444
445 shost->transportt = mvs_stt;
446 shost->max_id = 128;
447 shost->max_lun = ~0;
448 shost->max_channel = 1;
449 shost->max_cmd_len = 16;
450
451 return 0;
452exit_free:
453 kfree(arr_phy);
454 kfree(arr_port);
455 return -1;
456
457}
458
459static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
460 const struct mvs_chip_info *chip_info)
461{
462 int can_queue, i = 0, j = 0;
463 struct mvs_info *mvi = NULL;
464 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
465 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
466
467 for (j = 0; j < nr_core; j++) {
468 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
469 for (i = 0; i < chip_info->n_phy; i++) {
470 sha->sas_phy[j * chip_info->n_phy + i] =
471 &mvi->phy[i].sas_phy;
472 sha->sas_port[j * chip_info->n_phy + i] =
473 &mvi->port[i].sas_port;
474 }
475 }
476
477 sha->sas_ha_name = DRV_NAME;
478 sha->dev = mvi->dev;
479 sha->lldd_module = THIS_MODULE;
480 sha->sas_addr = &mvi->sas_addr[0];
481
482 sha->num_phys = nr_core * chip_info->n_phy;
483
484 sha->lldd_max_execute_num = 1;
485
486 if (mvi->flags & MVF_FLAG_SOC)
487 can_queue = MVS_SOC_CAN_QUEUE;
488 else
489 can_queue = MVS_CAN_QUEUE;
490
491 sha->lldd_queue_size = can_queue;
492 shost->can_queue = can_queue;
493 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
494 sha->core.shost = mvi->shost;
495}
496
497static void mvs_init_sas_add(struct mvs_info *mvi)
498{
499 u8 i;
500 for (i = 0; i < mvi->chip->n_phy; i++) {
501 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
502 mvi->phy[i].dev_sas_addr =
503 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
504 }
505
506 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
507}
508
509static int __devinit mvs_pci_init(struct pci_dev *pdev,
510 const struct pci_device_id *ent)
511{
512 unsigned int rc, nhost = 0;
513 struct mvs_info *mvi;
514 irq_handler_t irq_handler = mvs_interrupt;
515 struct Scsi_Host *shost = NULL;
516 const struct mvs_chip_info *chip;
517
518 dev_printk(KERN_INFO, &pdev->dev,
519 "mvsas: driver version %s\n", DRV_VERSION);
520 rc = pci_enable_device(pdev);
521 if (rc)
522 goto err_out_enable;
523
524 pci_set_master(pdev);
525
526 rc = pci_request_regions(pdev, DRV_NAME);
527 if (rc)
528 goto err_out_disable;
529
530 rc = pci_go_64(pdev);
531 if (rc)
532 goto err_out_regions;
533
534 shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
535 if (!shost) {
536 rc = -ENOMEM;
537 goto err_out_regions;
538 }
539
540 chip = &mvs_chips[ent->driver_data];
541 SHOST_TO_SAS_HA(shost) =
542 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
543 if (!SHOST_TO_SAS_HA(shost)) {
544 kfree(shost);
545 rc = -ENOMEM;
546 goto err_out_regions;
547 }
548
549 rc = mvs_prep_sas_ha_init(shost, chip);
550 if (rc) {
551 kfree(shost);
552 rc = -ENOMEM;
553 goto err_out_regions;
554 }
555
556 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
557
558 do {
559 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
560 if (!mvi) {
561 rc = -ENOMEM;
562 goto err_out_regions;
563 }
564
565 mvs_init_sas_add(mvi);
566
567 mvi->instance = nhost;
568 rc = MVS_CHIP_DISP->chip_init(mvi);
569 if (rc) {
570 mvs_free(mvi);
571 goto err_out_regions;
572 }
573 nhost++;
574 } while (nhost < chip->n_host);
575
576 mvs_post_sas_ha_init(shost, chip);
577
578 rc = scsi_add_host(shost, &pdev->dev);
579 if (rc)
580 goto err_out_shost;
581
582 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
583 if (rc)
584 goto err_out_shost;
585 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
586 DRV_NAME, SHOST_TO_SAS_HA(shost));
587 if (rc)
588 goto err_not_sas;
589
590 MVS_CHIP_DISP->interrupt_enable(mvi);
591
592 scsi_scan_host(mvi->shost);
593
594 return 0;
595
596err_not_sas:
597 sas_unregister_ha(SHOST_TO_SAS_HA(shost));
598err_out_shost:
599 scsi_remove_host(mvi->shost);
600err_out_regions:
601 pci_release_regions(pdev);
602err_out_disable:
603 pci_disable_device(pdev);
604err_out_enable:
605 return rc;
606}
607
608static void __devexit mvs_pci_remove(struct pci_dev *pdev)
609{
610 unsigned short core_nr, i = 0;
611 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
612 struct mvs_info *mvi = NULL;
613
614 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
615 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
616
617#ifdef MVS_USE_TASKLET
618 tasklet_kill(&mv_tasklet);
619#endif
620
621 pci_set_drvdata(pdev, NULL);
622 sas_unregister_ha(sha);
623 sas_remove_host(mvi->shost);
624 scsi_remove_host(mvi->shost);
625
626 MVS_CHIP_DISP->interrupt_disable(mvi);
627 free_irq(mvi->irq, sha);
628 for (i = 0; i < core_nr; i++) {
629 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
630 mvs_free(mvi);
631 }
632 kfree(sha->sas_phy);
633 kfree(sha->sas_port);
634 kfree(sha);
635 pci_release_regions(pdev);
636 pci_disable_device(pdev);
637 return;
638}
639
640static struct pci_device_id __devinitdata mvs_pci_table[] = {
641 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
642 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
643 {
644 .vendor = PCI_VENDOR_ID_MARVELL,
645 .device = 0x6440,
646 .subvendor = PCI_ANY_ID,
647 .subdevice = 0x6480,
648 .class = 0,
649 .class_mask = 0,
650 .driver_data = chip_6485,
651 },
652 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
656
657 { } /* terminate list */
658};
659
660static struct pci_driver mvs_pci_driver = {
661 .name = DRV_NAME,
662 .id_table = mvs_pci_table,
663 .probe = mvs_pci_init,
664 .remove = __devexit_p(mvs_pci_remove),
665};
666
667/* task handler */
668struct task_struct *mvs_th;
669static int __init mvs_init(void)
670{
671 int rc;
672 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
673 if (!mvs_stt)
674 return -ENOMEM;
675
676 rc = pci_register_driver(&mvs_pci_driver);
677
678 if (rc)
679 goto err_out;
680
681 return 0;
682
683err_out:
684 sas_release_transport(mvs_stt);
685 return rc;
686}
687
688static void __exit mvs_exit(void)
689{
690 pci_unregister_driver(&mvs_pci_driver);
691 sas_release_transport(mvs_stt);
692}
693
694module_init(mvs_init);
695module_exit(mvs_exit);
696
697MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
698MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
699MODULE_VERSION(DRV_VERSION);
700MODULE_LICENSE("GPL");
701#ifdef CONFIG_PCI
702MODULE_DEVICE_TABLE(pci, mvs_pci_table);
703#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 000000000000..0d2138641214
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2154 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26
27static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
28{
29 if (task->lldd_task) {
30 struct mvs_slot_info *slot;
31 slot = task->lldd_task;
32 *tag = slot->slot_tag;
33 return 1;
34 }
35 return 0;
36}
37
38void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
39{
40 void *bitmap = &mvi->tags;
41 clear_bit(tag, bitmap);
42}
43
44void mvs_tag_free(struct mvs_info *mvi, u32 tag)
45{
46 mvs_tag_clear(mvi, tag);
47}
48
49void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
50{
51 void *bitmap = &mvi->tags;
52 set_bit(tag, bitmap);
53}
54
55inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
56{
57 unsigned int index, tag;
58 void *bitmap = &mvi->tags;
59
60 index = find_first_zero_bit(bitmap, mvi->tags_num);
61 tag = index;
62 if (tag >= mvi->tags_num)
63 return -SAS_QUEUE_FULL;
64 mvs_tag_set(mvi, tag);
65 *tag_out = tag;
66 return 0;
67}
68
69void mvs_tag_init(struct mvs_info *mvi)
70{
71 int i;
72 for (i = 0; i < mvi->tags_num; ++i)
73 mvs_tag_clear(mvi, i);
74}
75
76void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
77{
78 u32 i;
79 u32 run;
80 u32 offset;
81
82 offset = 0;
83 while (size) {
84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
85 if (size >= 16)
86 run = 16;
87 else
88 run = size;
89 size -= run;
90 for (i = 0; i < 16; i++) {
91 if (i < run)
92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
93 else
94 printk(KERN_DEBUG" ");
95 }
96 printk(KERN_DEBUG": ");
97 for (i = 0; i < run; i++)
98 printk(KERN_DEBUG"%c",
99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
101 data = &data[16];
102 offset += run;
103 }
104 printk(KERN_DEBUG"\n");
105}
106
107#if (_MV_DUMP > 1)
108static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
109 enum sas_protocol proto)
110{
111 u32 offset;
112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
113
114 offset = slot->cmd_size + MVS_OAF_SZ +
115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
117 tag);
118 mvs_hexdump(32, (u8 *) slot->response,
119 (u32) slot->buf_dma + offset);
120}
121#endif
122
123static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
124 enum sas_protocol proto)
125{
126#if (_MV_DUMP > 1)
127 u32 sz, w_ptr;
128 u64 addr;
129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
130
131 /*Delivery Queue */
132 sz = MVS_CHIP_SLOT_SZ;
133 w_ptr = slot->tx;
134 addr = mvi->tx_dma;
135 dev_printk(KERN_DEBUG, mvi->dev,
136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
137 dev_printk(KERN_DEBUG, mvi->dev,
138 "Delivery Queue Base Address=0x%llX (PA)"
139 "(tx_dma=0x%llX), Entry=%04d\n",
140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
143 /*Command List */
144 addr = mvi->slot_dma;
145 dev_printk(KERN_DEBUG, mvi->dev,
146 "Command List Base Address=0x%llX (PA)"
147 "(slot_dma=0x%llX), Header=%03d\n",
148 addr, (unsigned long long)slot->buf_dma, tag);
149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
150 /*mvs_cmd_hdr */
151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
153 /*1.command table area */
154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
156 /*2.open address frame area */
157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
159 (u32) slot->buf_dma + slot->cmd_size);
160 /*3.status buffer */
161 mvs_hba_sb_dump(mvi, tag, proto);
162 /*4.PRD table */
163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
167#endif
168}
169
170static void mvs_hba_cq_dump(struct mvs_info *mvi)
171{
172#if (_MV_DUMP > 2)
173 u64 addr;
174 void __iomem *regs = mvi->regs;
175 u32 entry = mvi->rx_cons + 1;
176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
177
178 /*Completion Queue */
179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
182 dev_printk(KERN_DEBUG, mvi->dev,
183 "Completion List Base Address=0x%llX (PA), "
184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
185 addr, entry - 1, mvi->rx[0]);
186 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
187 mvi->rx_dma + sizeof(u32) * entry);
188#endif
189}
190
191void mvs_get_sas_addr(void *buf, u32 buflen)
192{
193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
194}
195
196struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
197{
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
202
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
212 }
213 break;
214 }
215 i++;
216 }
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
219
220 return mvi;
221
222}
223
224/* FIXME */
225int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
226{
227 unsigned long i = 0, j = 0, n = 0, num = 0;
228 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
229 struct mvs_info *mvi = mvi_dev->mvi_info;
230 struct sas_ha_struct *sha = dev->port->ha;
231
232 while (sha->sas_port[i]) {
233 if (sha->sas_port[i] == dev->port) {
234 struct asd_sas_phy *phy;
235 list_for_each_entry(phy,
236 &sha->sas_port[i]->phy_list, port_phy_el) {
237 j = 0;
238 while (sha->sas_phy[j]) {
239 if (sha->sas_phy[j] == phy)
240 break;
241 j++;
242 }
243 phyno[n] = (j >= mvi->chip->n_phy) ?
244 (j - mvi->chip->n_phy) : j;
245 num++;
246 n++;
247 }
248 break;
249 }
250 i++;
251 }
252 return num;
253}
254
255static inline void mvs_free_reg_set(struct mvs_info *mvi,
256 struct mvs_device *dev)
257{
258 if (!dev) {
259 mv_printk("device has been free.\n");
260 return;
261 }
262 if (dev->runing_req != 0)
263 return;
264 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
265 return;
266 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
267}
268
269static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
270 struct mvs_device *dev)
271{
272 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
273 return 0;
274 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
275}
276
277void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
278{
279 u32 no;
280 for_each_phy(phy_mask, phy_mask, no) {
281 if (!(phy_mask & 1))
282 continue;
283 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
284 }
285}
286
287/* FIXME: locking? */
288int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
289 void *funcdata)
290{
291 int rc = 0, phy_id = sas_phy->id;
292 u32 tmp, i = 0, hi;
293 struct sas_ha_struct *sha = sas_phy->ha;
294 struct mvs_info *mvi = NULL;
295
296 while (sha->sas_phy[i]) {
297 if (sha->sas_phy[i] == sas_phy)
298 break;
299 i++;
300 }
301 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
302 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
303
304 switch (func) {
305 case PHY_FUNC_SET_LINK_RATE:
306 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
307 break;
308
309 case PHY_FUNC_HARD_RESET:
310 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
311 if (tmp & PHY_RST_HARD)
312 break;
313 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
314 break;
315
316 case PHY_FUNC_LINK_RESET:
317 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
318 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
319 break;
320
321 case PHY_FUNC_DISABLE:
322 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
323 break;
324 case PHY_FUNC_RELEASE_SPINUP_HOLD:
325 default:
326 rc = -EOPNOTSUPP;
327 }
328 msleep(200);
329 return rc;
330}
331
332void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
333 u32 off_lo, u32 off_hi, u64 sas_addr)
334{
335 u32 lo = (u32)sas_addr;
336 u32 hi = (u32)(sas_addr>>32);
337
338 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
339 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
340 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
341 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
342}
343
344static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
345{
346 struct mvs_phy *phy = &mvi->phy[i];
347 struct asd_sas_phy *sas_phy = &phy->sas_phy;
348 struct sas_ha_struct *sas_ha;
349 if (!phy->phy_attached)
350 return;
351
352 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
353 && phy->phy_type & PORT_TYPE_SAS) {
354 return;
355 }
356
357 sas_ha = mvi->sas;
358 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
359
360 if (sas_phy->phy) {
361 struct sas_phy *sphy = sas_phy->phy;
362
363 sphy->negotiated_linkrate = sas_phy->linkrate;
364 sphy->minimum_linkrate = phy->minimum_linkrate;
365 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
366 sphy->maximum_linkrate = phy->maximum_linkrate;
367 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
368 }
369
370 if (phy->phy_type & PORT_TYPE_SAS) {
371 struct sas_identify_frame *id;
372
373 id = (struct sas_identify_frame *)phy->frame_rcvd;
374 id->dev_type = phy->identify.device_type;
375 id->initiator_bits = SAS_PROTOCOL_ALL;
376 id->target_bits = phy->identify.target_port_protocols;
377 } else if (phy->phy_type & PORT_TYPE_SATA) {
378 /*Nothing*/
379 }
380 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
381
382 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
383
384 mvi->sas->notify_port_event(sas_phy,
385 PORTE_BYTES_DMAED);
386}
387
388int mvs_slave_alloc(struct scsi_device *scsi_dev)
389{
390 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
391 if (dev_is_sata(dev)) {
392 /* We don't need to rescan targets
393 * if REPORT_LUNS request is failed
394 */
395 if (scsi_dev->lun > 0)
396 return -ENXIO;
397 scsi_dev->tagged_supported = 1;
398 }
399
400 return sas_slave_alloc(scsi_dev);
401}
402
403int mvs_slave_configure(struct scsi_device *sdev)
404{
405 struct domain_device *dev = sdev_to_domain_dev(sdev);
406 int ret = sas_slave_configure(sdev);
407
408 if (ret)
409 return ret;
410 if (dev_is_sata(dev)) {
411 /* may set PIO mode */
412 #if MV_DISABLE_NCQ
413 struct ata_port *ap = dev->sata_dev.ap;
414 struct ata_device *adev = ap->link.device;
415 adev->flags |= ATA_DFLAG_NCQ_OFF;
416 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
417 #endif
418 }
419 return 0;
420}
421
422void mvs_scan_start(struct Scsi_Host *shost)
423{
424 int i, j;
425 unsigned short core_nr;
426 struct mvs_info *mvi;
427 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
428
429 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
430
431 for (j = 0; j < core_nr; j++) {
432 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
433 for (i = 0; i < mvi->chip->n_phy; ++i)
434 mvs_bytes_dmaed(mvi, i);
435 }
436}
437
438int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
439{
440 /* give the phy enabling interrupt event time to come in (1s
441 * is empirically about all it takes) */
442 if (time < HZ)
443 return 0;
444 /* Wait for discovery to finish */
445 scsi_flush_work(shost);
446 return 1;
447}
448
449static int mvs_task_prep_smp(struct mvs_info *mvi,
450 struct mvs_task_exec_info *tei)
451{
452 int elem, rc, i;
453 struct sas_task *task = tei->task;
454 struct mvs_cmd_hdr *hdr = tei->hdr;
455 struct domain_device *dev = task->dev;
456 struct asd_sas_port *sas_port = dev->port;
457 struct scatterlist *sg_req, *sg_resp;
458 u32 req_len, resp_len, tag = tei->tag;
459 void *buf_tmp;
460 u8 *buf_oaf;
461 dma_addr_t buf_tmp_dma;
462 void *buf_prd;
463 struct mvs_slot_info *slot = &mvi->slot_info[tag];
464 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
465#if _MV_DUMP
466 u8 *buf_cmd;
467 void *from;
468#endif
469 /*
470 * DMA-map SMP request, response buffers
471 */
472 sg_req = &task->smp_task.smp_req;
473 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
474 if (!elem)
475 return -ENOMEM;
476 req_len = sg_dma_len(sg_req);
477
478 sg_resp = &task->smp_task.smp_resp;
479 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
480 if (!elem) {
481 rc = -ENOMEM;
482 goto err_out;
483 }
484 resp_len = SB_RFB_MAX;
485
486 /* must be in dwords */
487 if ((req_len & 0x3) || (resp_len & 0x3)) {
488 rc = -EINVAL;
489 goto err_out_2;
490 }
491
492 /*
493 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
494 */
495
496 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
497 buf_tmp = slot->buf;
498 buf_tmp_dma = slot->buf_dma;
499
500#if _MV_DUMP
501 buf_cmd = buf_tmp;
502 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
503 buf_tmp += req_len;
504 buf_tmp_dma += req_len;
505 slot->cmd_size = req_len;
506#else
507 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
508#endif
509
510 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
511 buf_oaf = buf_tmp;
512 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
513
514 buf_tmp += MVS_OAF_SZ;
515 buf_tmp_dma += MVS_OAF_SZ;
516
517 /* region 3: PRD table *********************************** */
518 buf_prd = buf_tmp;
519 if (tei->n_elem)
520 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
521 else
522 hdr->prd_tbl = 0;
523
524 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
525 buf_tmp += i;
526 buf_tmp_dma += i;
527
528 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
529 slot->response = buf_tmp;
530 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
531 if (mvi->flags & MVF_FLAG_SOC)
532 hdr->reserved[0] = 0;
533
534 /*
535 * Fill in TX ring and command slot header
536 */
537 slot->tx = mvi->tx_prod;
538 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
539 TXQ_MODE_I | tag |
540 (sas_port->phy_mask << TXQ_PHY_SHIFT));
541
542 hdr->flags |= flags;
543 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
544 hdr->tags = cpu_to_le32(tag);
545 hdr->data_len = 0;
546
547 /* generate open address frame hdr (first 12 bytes) */
548 /* initiator, SMP, ftype 1h */
549 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
550 buf_oaf[1] = dev->linkrate & 0xf;
551 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
552 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
553
554 /* fill in PRD (scatter/gather) table, if any */
555 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
556
557#if _MV_DUMP
558 /* copy cmd table */
559 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
560 memcpy(buf_cmd, from + sg_req->offset, req_len);
561 kunmap_atomic(from, KM_IRQ0);
562#endif
563 return 0;
564
565err_out_2:
566 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
567 PCI_DMA_FROMDEVICE);
568err_out:
569 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
570 PCI_DMA_TODEVICE);
571 return rc;
572}
573
574static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
575{
576 struct ata_queued_cmd *qc = task->uldd_task;
577
578 if (qc) {
579 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
580 qc->tf.command == ATA_CMD_FPDMA_READ) {
581 *tag = qc->tag;
582 return 1;
583 }
584 }
585
586 return 0;
587}
588
589static int mvs_task_prep_ata(struct mvs_info *mvi,
590 struct mvs_task_exec_info *tei)
591{
592 struct sas_task *task = tei->task;
593 struct domain_device *dev = task->dev;
594 struct mvs_device *mvi_dev = dev->lldd_dev;
595 struct mvs_cmd_hdr *hdr = tei->hdr;
596 struct asd_sas_port *sas_port = dev->port;
597 struct mvs_slot_info *slot;
598 void *buf_prd;
599 u32 tag = tei->tag, hdr_tag;
600 u32 flags, del_q;
601 void *buf_tmp;
602 u8 *buf_cmd, *buf_oaf;
603 dma_addr_t buf_tmp_dma;
604 u32 i, req_len, resp_len;
605 const u32 max_resp_len = SB_RFB_MAX;
606
607 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
608 mv_dprintk("Have not enough regiset for dev %d.\n",
609 mvi_dev->device_id);
610 return -EBUSY;
611 }
612 slot = &mvi->slot_info[tag];
613 slot->tx = mvi->tx_prod;
614 del_q = TXQ_MODE_I | tag |
615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
617 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
618 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
619
620#ifndef DISABLE_HOTPLUG_DMA_FIX
621 if (task->data_dir == DMA_FROM_DEVICE)
622 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
623 else
624 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
625#else
626 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
627#endif
628 if (task->ata_task.use_ncq)
629 flags |= MCH_FPDMA;
630 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
631 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
632 flags |= MCH_ATAPI;
633 }
634
635 /* FIXME: fill in port multiplier number */
636
637 hdr->flags = cpu_to_le32(flags);
638
639 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
640 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
641 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
642 else
643 hdr_tag = tag;
644
645 hdr->tags = cpu_to_le32(hdr_tag);
646
647 hdr->data_len = cpu_to_le32(task->total_xfer_len);
648
649 /*
650 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
651 */
652
653 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
654 buf_cmd = buf_tmp = slot->buf;
655 buf_tmp_dma = slot->buf_dma;
656
657 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
658
659 buf_tmp += MVS_ATA_CMD_SZ;
660 buf_tmp_dma += MVS_ATA_CMD_SZ;
661#if _MV_DUMP
662 slot->cmd_size = MVS_ATA_CMD_SZ;
663#endif
664
665 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
666 /* used for STP. unused for SATA? */
667 buf_oaf = buf_tmp;
668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
669
670 buf_tmp += MVS_OAF_SZ;
671 buf_tmp_dma += MVS_OAF_SZ;
672
673 /* region 3: PRD table ********************************************* */
674 buf_prd = buf_tmp;
675
676 if (tei->n_elem)
677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
678 else
679 hdr->prd_tbl = 0;
680 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
681
682 buf_tmp += i;
683 buf_tmp_dma += i;
684
685 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
686 /* FIXME: probably unused, for SATA. kept here just in case
687 * we get a STP/SATA error information record
688 */
689 slot->response = buf_tmp;
690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
691 if (mvi->flags & MVF_FLAG_SOC)
692 hdr->reserved[0] = 0;
693
694 req_len = sizeof(struct host_to_dev_fis);
695 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
696 sizeof(struct mvs_err_info) - i;
697
698 /* request, response lengths */
699 resp_len = min(resp_len, max_resp_len);
700 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
701
702 if (likely(!task->ata_task.device_control_reg_update))
703 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
704 /* fill in command FIS and ATAPI CDB */
705 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
706 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
707 memcpy(buf_cmd + STP_ATAPI_CMD,
708 task->ata_task.atapi_packet, 16);
709
710 /* generate open address frame hdr (first 12 bytes) */
711 /* initiator, STP, ftype 1h */
712 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
713 buf_oaf[1] = dev->linkrate & 0xf;
714 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
715 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
716
717 /* fill in PRD (scatter/gather) table, if any */
718 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
719#ifndef DISABLE_HOTPLUG_DMA_FIX
720 if (task->data_dir == DMA_FROM_DEVICE)
721 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
722 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
723#endif
724 return 0;
725}
726
727static int mvs_task_prep_ssp(struct mvs_info *mvi,
728 struct mvs_task_exec_info *tei, int is_tmf,
729 struct mvs_tmf_task *tmf)
730{
731 struct sas_task *task = tei->task;
732 struct mvs_cmd_hdr *hdr = tei->hdr;
733 struct mvs_port *port = tei->port;
734 struct domain_device *dev = task->dev;
735 struct mvs_device *mvi_dev = dev->lldd_dev;
736 struct asd_sas_port *sas_port = dev->port;
737 struct mvs_slot_info *slot;
738 void *buf_prd;
739 struct ssp_frame_hdr *ssp_hdr;
740 void *buf_tmp;
741 u8 *buf_cmd, *buf_oaf, fburst = 0;
742 dma_addr_t buf_tmp_dma;
743 u32 flags;
744 u32 resp_len, req_len, i, tag = tei->tag;
745 const u32 max_resp_len = SB_RFB_MAX;
746 u32 phy_mask;
747
748 slot = &mvi->slot_info[tag];
749
750 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
751 sas_port->phy_mask) & TXQ_PHY_MASK;
752
753 slot->tx = mvi->tx_prod;
754 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
755 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
756 (phy_mask << TXQ_PHY_SHIFT));
757
758 flags = MCH_RETRY;
759 if (task->ssp_task.enable_first_burst) {
760 flags |= MCH_FBURST;
761 fburst = (1 << 7);
762 }
763 if (is_tmf)
764 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
765 else
766 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
767 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
768 hdr->tags = cpu_to_le32(tag);
769 hdr->data_len = cpu_to_le32(task->total_xfer_len);
770
771 /*
772 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
773 */
774
775 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
776 buf_cmd = buf_tmp = slot->buf;
777 buf_tmp_dma = slot->buf_dma;
778
779 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
780
781 buf_tmp += MVS_SSP_CMD_SZ;
782 buf_tmp_dma += MVS_SSP_CMD_SZ;
783#if _MV_DUMP
784 slot->cmd_size = MVS_SSP_CMD_SZ;
785#endif
786
787 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
788 buf_oaf = buf_tmp;
789 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
790
791 buf_tmp += MVS_OAF_SZ;
792 buf_tmp_dma += MVS_OAF_SZ;
793
794 /* region 3: PRD table ********************************************* */
795 buf_prd = buf_tmp;
796 if (tei->n_elem)
797 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
798 else
799 hdr->prd_tbl = 0;
800
801 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
802 buf_tmp += i;
803 buf_tmp_dma += i;
804
805 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
806 slot->response = buf_tmp;
807 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
808 if (mvi->flags & MVF_FLAG_SOC)
809 hdr->reserved[0] = 0;
810
811 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
812 sizeof(struct mvs_err_info) - i;
813 resp_len = min(resp_len, max_resp_len);
814
815 req_len = sizeof(struct ssp_frame_hdr) + 28;
816
817 /* request, response lengths */
818 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
819
820 /* generate open address frame hdr (first 12 bytes) */
821 /* initiator, SSP, ftype 1h */
822 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
823 buf_oaf[1] = dev->linkrate & 0xf;
824 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
825 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
826
827 /* fill in SSP frame header (Command Table.SSP frame header) */
828 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
829
830 if (is_tmf)
831 ssp_hdr->frame_type = SSP_TASK;
832 else
833 ssp_hdr->frame_type = SSP_COMMAND;
834
835 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
836 HASHED_SAS_ADDR_SIZE);
837 memcpy(ssp_hdr->hashed_src_addr,
838 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
839 ssp_hdr->tag = cpu_to_be16(tag);
840
841 /* fill in IU for TASK and Command Frame */
842 buf_cmd += sizeof(*ssp_hdr);
843 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
844
845 if (ssp_hdr->frame_type != SSP_TASK) {
846 buf_cmd[9] = fburst | task->ssp_task.task_attr |
847 (task->ssp_task.task_prio << 3);
848 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
849 } else{
850 buf_cmd[10] = tmf->tmf;
851 switch (tmf->tmf) {
852 case TMF_ABORT_TASK:
853 case TMF_QUERY_TASK:
854 buf_cmd[12] =
855 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
856 buf_cmd[13] =
857 tmf->tag_of_task_to_be_managed & 0xff;
858 break;
859 default:
860 break;
861 }
862 }
863 /* fill in PRD (scatter/gather) table, if any */
864 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
865 return 0;
866}
867
868#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
869static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
870 struct completion *completion,int is_tmf,
871 struct mvs_tmf_task *tmf)
872{
873 struct domain_device *dev = task->dev;
874 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
875 struct mvs_info *mvi = mvi_dev->mvi_info;
876 struct mvs_task_exec_info tei;
877 struct sas_task *t = task;
878 struct mvs_slot_info *slot;
879 u32 tag = 0xdeadbeef, rc, n_elem = 0;
880 u32 n = num, pass = 0;
881 unsigned long flags = 0;
882
883 if (!dev->port) {
884 struct task_status_struct *tsm = &t->task_status;
885
886 tsm->resp = SAS_TASK_UNDELIVERED;
887 tsm->stat = SAS_PHY_DOWN;
888 t->task_done(t);
889 return 0;
890 }
891
892 spin_lock_irqsave(&mvi->lock, flags);
893 do {
894 dev = t->dev;
895 mvi_dev = dev->lldd_dev;
896 if (DEV_IS_GONE(mvi_dev)) {
897 if (mvi_dev)
898 mv_dprintk("device %d not ready.\n",
899 mvi_dev->device_id);
900 else
901 mv_dprintk("device %016llx not ready.\n",
902 SAS_ADDR(dev->sas_addr));
903
904 rc = SAS_PHY_DOWN;
905 goto out_done;
906 }
907
908 if (dev->port->id >= mvi->chip->n_phy)
909 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
910 else
911 tei.port = &mvi->port[dev->port->id];
912
913 if (!tei.port->port_attached) {
914 if (sas_protocol_ata(t->task_proto)) {
915 mv_dprintk("port %d does not"
916 "attached device.\n", dev->port->id);
917 rc = SAS_PHY_DOWN;
918 goto out_done;
919 } else {
920 struct task_status_struct *ts = &t->task_status;
921 ts->resp = SAS_TASK_UNDELIVERED;
922 ts->stat = SAS_PHY_DOWN;
923 t->task_done(t);
924 if (n > 1)
925 t = list_entry(t->list.next,
926 struct sas_task, list);
927 continue;
928 }
929 }
930
931 if (!sas_protocol_ata(t->task_proto)) {
932 if (t->num_scatter) {
933 n_elem = dma_map_sg(mvi->dev,
934 t->scatter,
935 t->num_scatter,
936 t->data_dir);
937 if (!n_elem) {
938 rc = -ENOMEM;
939 goto err_out;
940 }
941 }
942 } else {
943 n_elem = t->num_scatter;
944 }
945
946 rc = mvs_tag_alloc(mvi, &tag);
947 if (rc)
948 goto err_out;
949
950 slot = &mvi->slot_info[tag];
951
952
953 t->lldd_task = NULL;
954 slot->n_elem = n_elem;
955 slot->slot_tag = tag;
956 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
957
958 tei.task = t;
959 tei.hdr = &mvi->slot[tag];
960 tei.tag = tag;
961 tei.n_elem = n_elem;
962 switch (t->task_proto) {
963 case SAS_PROTOCOL_SMP:
964 rc = mvs_task_prep_smp(mvi, &tei);
965 break;
966 case SAS_PROTOCOL_SSP:
967 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
968 break;
969 case SAS_PROTOCOL_SATA:
970 case SAS_PROTOCOL_STP:
971 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
972 rc = mvs_task_prep_ata(mvi, &tei);
973 break;
974 default:
975 dev_printk(KERN_ERR, mvi->dev,
976 "unknown sas_task proto: 0x%x\n",
977 t->task_proto);
978 rc = -EINVAL;
979 break;
980 }
981
982 if (rc) {
983 mv_dprintk("rc is %x\n", rc);
984 goto err_out_tag;
985 }
986 slot->task = t;
987 slot->port = tei.port;
988 t->lldd_task = slot;
989 list_add_tail(&slot->entry, &tei.port->list);
990 /* TODO: select normal or high priority */
991 spin_lock(&t->task_state_lock);
992 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
993 spin_unlock(&t->task_state_lock);
994
995 mvs_hba_memory_dump(mvi, tag, t->task_proto);
996 mvi_dev->runing_req++;
997 ++pass;
998 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
999 if (n > 1)
1000 t = list_entry(t->list.next, struct sas_task, list);
1001 } while (--n);
1002 rc = 0;
1003 goto out_done;
1004
1005err_out_tag:
1006 mvs_tag_free(mvi, tag);
1007err_out:
1008
1009 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1010 if (!sas_protocol_ata(t->task_proto))
1011 if (n_elem)
1012 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1013 t->data_dir);
1014out_done:
1015 if (likely(pass)) {
1016 MVS_CHIP_DISP->start_delivery(mvi,
1017 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1018 }
1019 spin_unlock_irqrestore(&mvi->lock, flags);
1020 return rc;
1021}
1022
1023int mvs_queue_command(struct sas_task *task, const int num,
1024 gfp_t gfp_flags)
1025{
1026 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1027}
1028
1029static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1030{
1031 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1032 mvs_tag_clear(mvi, slot_idx);
1033}
1034
1035static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1036 struct mvs_slot_info *slot, u32 slot_idx)
1037{
1038 if (!slot->task)
1039 return;
1040 if (!sas_protocol_ata(task->task_proto))
1041 if (slot->n_elem)
1042 dma_unmap_sg(mvi->dev, task->scatter,
1043 slot->n_elem, task->data_dir);
1044
1045 switch (task->task_proto) {
1046 case SAS_PROTOCOL_SMP:
1047 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1048 PCI_DMA_FROMDEVICE);
1049 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1050 PCI_DMA_TODEVICE);
1051 break;
1052
1053 case SAS_PROTOCOL_SATA:
1054 case SAS_PROTOCOL_STP:
1055 case SAS_PROTOCOL_SSP:
1056 default:
1057 /* do nothing */
1058 break;
1059 }
1060 list_del_init(&slot->entry);
1061 task->lldd_task = NULL;
1062 slot->task = NULL;
1063 slot->port = NULL;
1064 slot->slot_tag = 0xFFFFFFFF;
1065 mvs_slot_free(mvi, slot_idx);
1066}
1067
1068static void mvs_update_wideport(struct mvs_info *mvi, int i)
1069{
1070 struct mvs_phy *phy = &mvi->phy[i];
1071 struct mvs_port *port = phy->port;
1072 int j, no;
1073
1074 for_each_phy(port->wide_port_phymap, j, no) {
1075 if (j & 1) {
1076 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1077 PHYR_WIDE_PORT);
1078 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1079 port->wide_port_phymap);
1080 } else {
1081 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1082 PHYR_WIDE_PORT);
1083 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1084 0);
1085 }
1086 }
1087}
1088
1089static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1090{
1091 u32 tmp;
1092 struct mvs_phy *phy = &mvi->phy[i];
1093 struct mvs_port *port = phy->port;
1094
1095 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1096 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1097 if (!port)
1098 phy->phy_attached = 1;
1099 return tmp;
1100 }
1101
1102 if (port) {
1103 if (phy->phy_type & PORT_TYPE_SAS) {
1104 port->wide_port_phymap &= ~(1U << i);
1105 if (!port->wide_port_phymap)
1106 port->port_attached = 0;
1107 mvs_update_wideport(mvi, i);
1108 } else if (phy->phy_type & PORT_TYPE_SATA)
1109 port->port_attached = 0;
1110 phy->port = NULL;
1111 phy->phy_attached = 0;
1112 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1113 }
1114 return 0;
1115}
1116
1117static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1118{
1119 u32 *s = (u32 *) buf;
1120
1121 if (!s)
1122 return NULL;
1123
1124 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1125 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1126
1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1128 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1129
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1131 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1132
1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1134 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1135
1136 /* Workaround: take some ATAPI devices for ATA */
1137 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1138 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1139
1140 return s;
1141}
1142
1143static u32 mvs_is_sig_fis_received(u32 irq_status)
1144{
1145 return irq_status & PHYEV_SIG_FIS;
1146}
1147
1148void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1149{
1150 struct mvs_phy *phy = &mvi->phy[i];
1151 struct sas_identify_frame *id;
1152
1153 id = (struct sas_identify_frame *)phy->frame_rcvd;
1154
1155 if (get_st) {
1156 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1157 phy->phy_status = mvs_is_phy_ready(mvi, i);
1158 }
1159
1160 if (phy->phy_status) {
1161 int oob_done = 0;
1162 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1163
1164 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1165
1166 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1167 if (phy->phy_type & PORT_TYPE_SATA) {
1168 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1169 if (mvs_is_sig_fis_received(phy->irq_status)) {
1170 phy->phy_attached = 1;
1171 phy->att_dev_sas_addr =
1172 i + mvi->id * mvi->chip->n_phy;
1173 if (oob_done)
1174 sas_phy->oob_mode = SATA_OOB_MODE;
1175 phy->frame_rcvd_size =
1176 sizeof(struct dev_to_host_fis);
1177 mvs_get_d2h_reg(mvi, i, id);
1178 } else {
1179 u32 tmp;
1180 dev_printk(KERN_DEBUG, mvi->dev,
1181 "Phy%d : No sig fis\n", i);
1182 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1183 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1184 tmp | PHYEV_SIG_FIS);
1185 phy->phy_attached = 0;
1186 phy->phy_type &= ~PORT_TYPE_SATA;
1187 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1188 goto out_done;
1189 }
1190 } else if (phy->phy_type & PORT_TYPE_SAS
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1;
1193 phy->identify.device_type =
1194 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1195
1196 if (phy->identify.device_type == SAS_END_DEV)
1197 phy->identify.target_port_protocols =
1198 SAS_PROTOCOL_SSP;
1199 else if (phy->identify.device_type != NO_DEVICE)
1200 phy->identify.target_port_protocols =
1201 SAS_PROTOCOL_SMP;
1202 if (oob_done)
1203 sas_phy->oob_mode = SAS_OOB_MODE;
1204 phy->frame_rcvd_size =
1205 sizeof(struct sas_identify_frame);
1206 }
1207 memcpy(sas_phy->attached_sas_addr,
1208 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1209
1210 if (MVS_CHIP_DISP->phy_work_around)
1211 MVS_CHIP_DISP->phy_work_around(mvi, i);
1212 }
1213 mv_dprintk("port %d attach dev info is %x\n",
1214 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1215 mv_dprintk("port %d attach sas addr is %llx\n",
1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1217out_done:
1218 if (get_st)
1219 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1220}
1221
1222static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1223{
1224 struct sas_ha_struct *sas_ha = sas_phy->ha;
1225 struct mvs_info *mvi = NULL; int i = 0, hi;
1226 struct mvs_phy *phy = sas_phy->lldd_phy;
1227 struct asd_sas_port *sas_port = sas_phy->port;
1228 struct mvs_port *port;
1229 unsigned long flags = 0;
1230 if (!sas_port)
1231 return;
1232
1233 while (sas_ha->sas_phy[i]) {
1234 if (sas_ha->sas_phy[i] == sas_phy)
1235 break;
1236 i++;
1237 }
1238 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1239 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1240 if (sas_port->id >= mvi->chip->n_phy)
1241 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1242 else
1243 port = &mvi->port[sas_port->id];
1244 if (lock)
1245 spin_lock_irqsave(&mvi->lock, flags);
1246 port->port_attached = 1;
1247 phy->port = port;
1248 if (phy->phy_type & PORT_TYPE_SAS) {
1249 port->wide_port_phymap = sas_port->phy_mask;
1250 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1251 mvs_update_wideport(mvi, sas_phy->id);
1252 }
1253 if (lock)
1254 spin_unlock_irqrestore(&mvi->lock, flags);
1255}
1256
1257static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1258{
1259 /*Nothing*/
1260}
1261
1262
1263void mvs_port_formed(struct asd_sas_phy *sas_phy)
1264{
1265 mvs_port_notify_formed(sas_phy, 1);
1266}
1267
1268void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1269{
1270 mvs_port_notify_deformed(sas_phy, 1);
1271}
1272
1273struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1274{
1275 u32 dev;
1276 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1277 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1278 mvi->devices[dev].device_id = dev;
1279 return &mvi->devices[dev];
1280 }
1281 }
1282
1283 if (dev == MVS_MAX_DEVICES)
1284 mv_printk("max support %d devices, ignore ..\n",
1285 MVS_MAX_DEVICES);
1286
1287 return NULL;
1288}
1289
1290void mvs_free_dev(struct mvs_device *mvi_dev)
1291{
1292 u32 id = mvi_dev->device_id;
1293 memset(mvi_dev, 0, sizeof(*mvi_dev));
1294 mvi_dev->device_id = id;
1295 mvi_dev->dev_type = NO_DEVICE;
1296 mvi_dev->dev_status = MVS_DEV_NORMAL;
1297 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1298}
1299
1300int mvs_dev_found_notify(struct domain_device *dev, int lock)
1301{
1302 unsigned long flags = 0;
1303 int res = 0;
1304 struct mvs_info *mvi = NULL;
1305 struct domain_device *parent_dev = dev->parent;
1306 struct mvs_device *mvi_device;
1307
1308 mvi = mvs_find_dev_mvi(dev);
1309
1310 if (lock)
1311 spin_lock_irqsave(&mvi->lock, flags);
1312
1313 mvi_device = mvs_alloc_dev(mvi);
1314 if (!mvi_device) {
1315 res = -1;
1316 goto found_out;
1317 }
1318 dev->lldd_dev = mvi_device;
1319 mvi_device->dev_type = dev->dev_type;
1320 mvi_device->mvi_info = mvi;
1321 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1322 int phy_id;
1323 u8 phy_num = parent_dev->ex_dev.num_phys;
1324 struct ex_phy *phy;
1325 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1326 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1327 if (SAS_ADDR(phy->attached_sas_addr) ==
1328 SAS_ADDR(dev->sas_addr)) {
1329 mvi_device->attached_phy = phy_id;
1330 break;
1331 }
1332 }
1333
1334 if (phy_id == phy_num) {
1335 mv_printk("Error: no attached dev:%016llx"
1336 "at ex:%016llx.\n",
1337 SAS_ADDR(dev->sas_addr),
1338 SAS_ADDR(parent_dev->sas_addr));
1339 res = -1;
1340 }
1341 }
1342
1343found_out:
1344 if (lock)
1345 spin_unlock_irqrestore(&mvi->lock, flags);
1346 return res;
1347}
1348
1349int mvs_dev_found(struct domain_device *dev)
1350{
1351 return mvs_dev_found_notify(dev, 1);
1352}
1353
1354void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1355{
1356 unsigned long flags = 0;
1357 struct mvs_device *mvi_dev = dev->lldd_dev;
1358 struct mvs_info *mvi = mvi_dev->mvi_info;
1359
1360 if (lock)
1361 spin_lock_irqsave(&mvi->lock, flags);
1362
1363 if (mvi_dev) {
1364 mv_dprintk("found dev[%d:%x] is gone.\n",
1365 mvi_dev->device_id, mvi_dev->dev_type);
1366 mvs_free_reg_set(mvi, mvi_dev);
1367 mvs_free_dev(mvi_dev);
1368 } else {
1369 mv_dprintk("found dev has gone.\n");
1370 }
1371 dev->lldd_dev = NULL;
1372
1373 if (lock)
1374 spin_unlock_irqrestore(&mvi->lock, flags);
1375}
1376
1377
1378void mvs_dev_gone(struct domain_device *dev)
1379{
1380 mvs_dev_gone_notify(dev, 1);
1381}
1382
1383static struct sas_task *mvs_alloc_task(void)
1384{
1385 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1386
1387 if (task) {
1388 INIT_LIST_HEAD(&task->list);
1389 spin_lock_init(&task->task_state_lock);
1390 task->task_state_flags = SAS_TASK_STATE_PENDING;
1391 init_timer(&task->timer);
1392 init_completion(&task->completion);
1393 }
1394 return task;
1395}
1396
1397static void mvs_free_task(struct sas_task *task)
1398{
1399 if (task) {
1400 BUG_ON(!list_empty(&task->list));
1401 kfree(task);
1402 }
1403}
1404
1405static void mvs_task_done(struct sas_task *task)
1406{
1407 if (!del_timer(&task->timer))
1408 return;
1409 complete(&task->completion);
1410}
1411
1412static void mvs_tmf_timedout(unsigned long data)
1413{
1414 struct sas_task *task = (struct sas_task *)data;
1415
1416 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1417 complete(&task->completion);
1418}
1419
1420/* XXX */
1421#define MVS_TASK_TIMEOUT 20
1422static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1423 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1424{
1425 int res, retry;
1426 struct sas_task *task = NULL;
1427
1428 for (retry = 0; retry < 3; retry++) {
1429 task = mvs_alloc_task();
1430 if (!task)
1431 return -ENOMEM;
1432
1433 task->dev = dev;
1434 task->task_proto = dev->tproto;
1435
1436 memcpy(&task->ssp_task, parameter, para_len);
1437 task->task_done = mvs_task_done;
1438
1439 task->timer.data = (unsigned long) task;
1440 task->timer.function = mvs_tmf_timedout;
1441 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1442 add_timer(&task->timer);
1443
1444 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1445
1446 if (res) {
1447 del_timer(&task->timer);
1448 mv_printk("executing internel task failed:%d\n", res);
1449 goto ex_err;
1450 }
1451
1452 wait_for_completion(&task->completion);
1453 res = -TMF_RESP_FUNC_FAILED;
1454 /* Even TMF timed out, return direct. */
1455 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1456 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1457 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1458 goto ex_err;
1459 }
1460 }
1461
1462 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1463 task->task_status.stat == SAM_GOOD) {
1464 res = TMF_RESP_FUNC_COMPLETE;
1465 break;
1466 }
1467
1468 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1469 task->task_status.stat == SAS_DATA_UNDERRUN) {
1470 /* no error, but return the number of bytes of
1471 * underrun */
1472 res = task->task_status.residual;
1473 break;
1474 }
1475
1476 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1477 task->task_status.stat == SAS_DATA_OVERRUN) {
1478 mv_dprintk("blocked task error.\n");
1479 res = -EMSGSIZE;
1480 break;
1481 } else {
1482 mv_dprintk(" task to dev %016llx response: 0x%x "
1483 "status 0x%x\n",
1484 SAS_ADDR(dev->sas_addr),
1485 task->task_status.resp,
1486 task->task_status.stat);
1487 mvs_free_task(task);
1488 task = NULL;
1489
1490 }
1491 }
1492ex_err:
1493 BUG_ON(retry == 3 && task != NULL);
1494 if (task != NULL)
1495 mvs_free_task(task);
1496 return res;
1497}
1498
1499static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1500 u8 *lun, struct mvs_tmf_task *tmf)
1501{
1502 struct sas_ssp_task ssp_task;
1503 DECLARE_COMPLETION_ONSTACK(completion);
1504 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1505 return TMF_RESP_FUNC_ESUPP;
1506
1507 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1508
1509 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1510 sizeof(ssp_task), tmf);
1511}
1512
1513
1514/* Standard mandates link reset for ATA (type 0)
1515 and hard reset for SSP (type 1) , only for RECOVERY */
1516static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1517{
1518 int rc;
1519 struct sas_phy *phy = sas_find_local_phy(dev);
1520 int reset_type = (dev->dev_type == SATA_DEV ||
1521 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1522 rc = sas_phy_reset(phy, reset_type);
1523 msleep(2000);
1524 return rc;
1525}
1526
1527/* mandatory SAM-3 */
1528int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1529{
1530 unsigned long flags;
1531 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1532 struct mvs_tmf_task tmf_task;
1533 struct mvs_device * mvi_dev = dev->lldd_dev;
1534 struct mvs_info *mvi = mvi_dev->mvi_info;
1535
1536 tmf_task.tmf = TMF_LU_RESET;
1537 mvi_dev->dev_status = MVS_DEV_EH;
1538 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1539 if (rc == TMF_RESP_FUNC_COMPLETE) {
1540 num = mvs_find_dev_phyno(dev, phyno);
1541 spin_lock_irqsave(&mvi->lock, flags);
1542 for (i = 0; i < num; i++)
1543 mvs_release_task(mvi, phyno[i], dev);
1544 spin_unlock_irqrestore(&mvi->lock, flags);
1545 }
1546 /* If failed, fall-through I_T_Nexus reset */
1547 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1548 mvi_dev->device_id, rc);
1549 return rc;
1550}
1551
1552int mvs_I_T_nexus_reset(struct domain_device *dev)
1553{
1554 unsigned long flags;
1555 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1556 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1557 struct mvs_info *mvi = mvi_dev->mvi_info;
1558
1559 if (mvi_dev->dev_status != MVS_DEV_EH)
1560 return TMF_RESP_FUNC_COMPLETE;
1561 rc = mvs_debug_I_T_nexus_reset(dev);
1562 mv_printk("%s for device[%x]:rc= %d\n",
1563 __func__, mvi_dev->device_id, rc);
1564
1565 /* housekeeper */
1566 num = mvs_find_dev_phyno(dev, phyno);
1567 spin_lock_irqsave(&mvi->lock, flags);
1568 for (i = 0; i < num; i++)
1569 mvs_release_task(mvi, phyno[i], dev);
1570 spin_unlock_irqrestore(&mvi->lock, flags);
1571
1572 return rc;
1573}
1574/* optional SAM-3 */
1575int mvs_query_task(struct sas_task *task)
1576{
1577 u32 tag;
1578 struct scsi_lun lun;
1579 struct mvs_tmf_task tmf_task;
1580 int rc = TMF_RESP_FUNC_FAILED;
1581
1582 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1583 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1584 struct domain_device *dev = task->dev;
1585 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1586 struct mvs_info *mvi = mvi_dev->mvi_info;
1587
1588 int_to_scsilun(cmnd->device->lun, &lun);
1589 rc = mvs_find_tag(mvi, task, &tag);
1590 if (rc == 0) {
1591 rc = TMF_RESP_FUNC_FAILED;
1592 return rc;
1593 }
1594
1595 tmf_task.tmf = TMF_QUERY_TASK;
1596 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1597
1598 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1599 switch (rc) {
1600 /* The task is still in Lun, release it then */
1601 case TMF_RESP_FUNC_SUCC:
1602 /* The task is not in Lun or failed, reset the phy */
1603 case TMF_RESP_FUNC_FAILED:
1604 case TMF_RESP_FUNC_COMPLETE:
1605 break;
1606 }
1607 }
1608 mv_printk("%s:rc= %d\n", __func__, rc);
1609 return rc;
1610}
1611
1612/* mandatory SAM-3, still need free task/slot info */
1613int mvs_abort_task(struct sas_task *task)
1614{
1615 struct scsi_lun lun;
1616 struct mvs_tmf_task tmf_task;
1617 struct domain_device *dev = task->dev;
1618 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1619 struct mvs_info *mvi = mvi_dev->mvi_info;
1620 int rc = TMF_RESP_FUNC_FAILED;
1621 unsigned long flags;
1622 u32 tag;
1623
1624 if (mvi->exp_req)
1625 mvi->exp_req--;
1626 spin_lock_irqsave(&task->task_state_lock, flags);
1627 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1628 spin_unlock_irqrestore(&task->task_state_lock, flags);
1629 rc = TMF_RESP_FUNC_COMPLETE;
1630 goto out;
1631 }
1632 spin_unlock_irqrestore(&task->task_state_lock, flags);
1633 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1634 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1635
1636 int_to_scsilun(cmnd->device->lun, &lun);
1637 rc = mvs_find_tag(mvi, task, &tag);
1638 if (rc == 0) {
1639 mv_printk("No such tag in %s\n", __func__);
1640 rc = TMF_RESP_FUNC_FAILED;
1641 return rc;
1642 }
1643
1644 tmf_task.tmf = TMF_ABORT_TASK;
1645 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1646
1647 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1648
1649 /* if successful, clear the task and callback forwards.*/
1650 if (rc == TMF_RESP_FUNC_COMPLETE) {
1651 u32 slot_no;
1652 struct mvs_slot_info *slot;
1653
1654 if (task->lldd_task) {
1655 slot = task->lldd_task;
1656 slot_no = (u32) (slot - mvi->slot_info);
1657 mvs_slot_complete(mvi, slot_no, 1);
1658 }
1659 }
1660 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1661 task->task_proto & SAS_PROTOCOL_STP) {
1662 /* to do free register_set */
1663 } else {
1664 /* SMP */
1665
1666 }
1667out:
1668 if (rc != TMF_RESP_FUNC_COMPLETE)
1669 mv_printk("%s:rc= %d\n", __func__, rc);
1670 return rc;
1671}
1672
1673int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1674{
1675 int rc = TMF_RESP_FUNC_FAILED;
1676 struct mvs_tmf_task tmf_task;
1677
1678 tmf_task.tmf = TMF_ABORT_TASK_SET;
1679 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1680
1681 return rc;
1682}
1683
1684int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1685{
1686 int rc = TMF_RESP_FUNC_FAILED;
1687 struct mvs_tmf_task tmf_task;
1688
1689 tmf_task.tmf = TMF_CLEAR_ACA;
1690 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1691
1692 return rc;
1693}
1694
1695int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1696{
1697 int rc = TMF_RESP_FUNC_FAILED;
1698 struct mvs_tmf_task tmf_task;
1699
1700 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1701 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1702
1703 return rc;
1704}
1705
1706static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1707 u32 slot_idx, int err)
1708{
1709 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1710 struct task_status_struct *tstat = &task->task_status;
1711 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1712 int stat = SAM_GOOD;
1713
1714
1715 resp->frame_len = sizeof(struct dev_to_host_fis);
1716 memcpy(&resp->ending_fis[0],
1717 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1718 sizeof(struct dev_to_host_fis));
1719 tstat->buf_valid_size = sizeof(*resp);
1720 if (unlikely(err))
1721 stat = SAS_PROTO_RESPONSE;
1722 return stat;
1723}
1724
1725static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1726 u32 slot_idx)
1727{
1728 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1729 int stat;
1730 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1731 u32 tfs = 0;
1732 enum mvs_port_type type = PORT_TYPE_SAS;
1733
1734 if (err_dw0 & CMD_ISS_STPD)
1735 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1736
1737 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1738
1739 stat = SAM_CHECK_COND;
1740 switch (task->task_proto) {
1741 case SAS_PROTOCOL_SSP:
1742 stat = SAS_ABORTED_TASK;
1743 break;
1744 case SAS_PROTOCOL_SMP:
1745 stat = SAM_CHECK_COND;
1746 break;
1747
1748 case SAS_PROTOCOL_SATA:
1749 case SAS_PROTOCOL_STP:
1750 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1751 {
1752 if (err_dw0 == 0x80400002)
1753 mv_printk("find reserved error, why?\n");
1754
1755 task->ata_task.use_ncq = 0;
1756 stat = SAS_PROTO_RESPONSE;
1757 mvs_sata_done(mvi, task, slot_idx, 1);
1758
1759 }
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 return stat;
1766}
1767
1768int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1769{
1770 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1771 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1772 struct sas_task *task = slot->task;
1773 struct mvs_device *mvi_dev = NULL;
1774 struct task_status_struct *tstat;
1775
1776 bool aborted;
1777 void *to;
1778 enum exec_status sts;
1779
1780 if (mvi->exp_req)
1781 mvi->exp_req--;
1782 if (unlikely(!task || !task->lldd_task))
1783 return -1;
1784
1785 tstat = &task->task_status;
1786 mvi_dev = task->dev->lldd_dev;
1787
1788 mvs_hba_cq_dump(mvi);
1789
1790 spin_lock(&task->task_state_lock);
1791 task->task_state_flags &=
1792 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1793 task->task_state_flags |= SAS_TASK_STATE_DONE;
1794 /* race condition*/
1795 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1796 spin_unlock(&task->task_state_lock);
1797
1798 memset(tstat, 0, sizeof(*tstat));
1799 tstat->resp = SAS_TASK_COMPLETE;
1800
1801 if (unlikely(aborted)) {
1802 tstat->stat = SAS_ABORTED_TASK;
1803 if (mvi_dev)
1804 mvi_dev->runing_req--;
1805 if (sas_protocol_ata(task->task_proto))
1806 mvs_free_reg_set(mvi, mvi_dev);
1807
1808 mvs_slot_task_free(mvi, task, slot, slot_idx);
1809 return -1;
1810 }
1811
1812 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
1813 mv_dprintk("port has not device.\n");
1814 tstat->stat = SAS_PHY_DOWN;
1815 goto out;
1816 }
1817
1818 /*
1819 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1820 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1821 err info:%016llx\n",
1822 SAS_ADDR(task->dev->sas_addr),
1823 rx_desc, (u64)(*(u64 *) slot->response));
1824 }
1825 */
1826
1827 /* error info record present */
1828 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1829 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1830 goto out;
1831 }
1832
1833 switch (task->task_proto) {
1834 case SAS_PROTOCOL_SSP:
1835 /* hw says status == 0, datapres == 0 */
1836 if (rx_desc & RXQ_GOOD) {
1837 tstat->stat = SAM_GOOD;
1838 tstat->resp = SAS_TASK_COMPLETE;
1839 }
1840 /* response frame present */
1841 else if (rx_desc & RXQ_RSP) {
1842 struct ssp_response_iu *iu = slot->response +
1843 sizeof(struct mvs_err_info);
1844 sas_ssp_task_response(mvi->dev, task, iu);
1845 } else
1846 tstat->stat = SAM_CHECK_COND;
1847 break;
1848
1849 case SAS_PROTOCOL_SMP: {
1850 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1851 tstat->stat = SAM_GOOD;
1852 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1853 memcpy(to + sg_resp->offset,
1854 slot->response + sizeof(struct mvs_err_info),
1855 sg_dma_len(sg_resp));
1856 kunmap_atomic(to, KM_IRQ0);
1857 break;
1858 }
1859
1860 case SAS_PROTOCOL_SATA:
1861 case SAS_PROTOCOL_STP:
1862 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1863 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1864 break;
1865 }
1866
1867 default:
1868 tstat->stat = SAM_CHECK_COND;
1869 break;
1870 }
1871
1872out:
1873 if (mvi_dev) {
1874 mvi_dev->runing_req--;
1875 if (sas_protocol_ata(task->task_proto))
1876 mvs_free_reg_set(mvi, mvi_dev);
1877 }
1878 mvs_slot_task_free(mvi, task, slot, slot_idx);
1879 sts = tstat->stat;
1880
1881 spin_unlock(&mvi->lock);
1882 if (task->task_done)
1883 task->task_done(task);
1884 else
1885 mv_dprintk("why has not task_done.\n");
1886 spin_lock(&mvi->lock);
1887
1888 return sts;
1889}
1890
1891void mvs_release_task(struct mvs_info *mvi,
1892 int phy_no, struct domain_device *dev)
1893{
1894 int i = 0; u32 slot_idx;
1895 struct mvs_phy *phy;
1896 struct mvs_port *port;
1897 struct mvs_slot_info *slot, *slot2;
1898
1899 phy = &mvi->phy[phy_no];
1900 port = phy->port;
1901 if (!port)
1902 return;
1903
1904 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1905 struct sas_task *task;
1906 slot_idx = (u32) (slot - mvi->slot_info);
1907 task = slot->task;
1908
1909 if (dev && task->dev != dev)
1910 continue;
1911
1912 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1913 slot_idx, slot->slot_tag, task);
1914
1915 if (task->task_proto & SAS_PROTOCOL_SSP) {
1916 mv_printk("attached with SSP task CDB[");
1917 for (i = 0; i < 16; i++)
1918 mv_printk(" %02x", task->ssp_task.cdb[i]);
1919 mv_printk(" ]\n");
1920 }
1921
1922 mvs_slot_complete(mvi, slot_idx, 1);
1923 }
1924}
1925
1926static void mvs_phy_disconnected(struct mvs_phy *phy)
1927{
1928 phy->phy_attached = 0;
1929 phy->att_dev_info = 0;
1930 phy->att_dev_sas_addr = 0;
1931}
1932
1933static void mvs_work_queue(struct work_struct *work)
1934{
1935 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1936 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1937 struct mvs_info *mvi = mwq->mvi;
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&mvi->lock, flags);
1941 if (mwq->handler & PHY_PLUG_EVENT) {
1942 u32 phy_no = (unsigned long) mwq->data;
1943 struct sas_ha_struct *sas_ha = mvi->sas;
1944 struct mvs_phy *phy = &mvi->phy[phy_no];
1945 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1946
1947 if (phy->phy_event & PHY_PLUG_OUT) {
1948 u32 tmp;
1949 struct sas_identify_frame *id;
1950 id = (struct sas_identify_frame *)phy->frame_rcvd;
1951 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1952 phy->phy_event &= ~PHY_PLUG_OUT;
1953 if (!(tmp & PHY_READY_MASK)) {
1954 sas_phy_disconnected(sas_phy);
1955 mvs_phy_disconnected(phy);
1956 sas_ha->notify_phy_event(sas_phy,
1957 PHYE_LOSS_OF_SIGNAL);
1958 mv_dprintk("phy%d Removed Device\n", phy_no);
1959 } else {
1960 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1961 mvs_update_phyinfo(mvi, phy_no, 1);
1962 mvs_bytes_dmaed(mvi, phy_no);
1963 mvs_port_notify_formed(sas_phy, 0);
1964 mv_dprintk("phy%d Attached Device\n", phy_no);
1965 }
1966 }
1967 }
1968 list_del(&mwq->entry);
1969 spin_unlock_irqrestore(&mvi->lock, flags);
1970 kfree(mwq);
1971}
1972
1973static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1974{
1975 struct mvs_wq *mwq;
1976 int ret = 0;
1977
1978 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1979 if (mwq) {
1980 mwq->mvi = mvi;
1981 mwq->data = data;
1982 mwq->handler = handler;
1983 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1984 list_add_tail(&mwq->entry, &mvi->wq_list);
1985 schedule_delayed_work(&mwq->work_q, HZ * 2);
1986 } else
1987 ret = -ENOMEM;
1988
1989 return ret;
1990}
1991
1992static void mvs_sig_time_out(unsigned long tphy)
1993{
1994 struct mvs_phy *phy = (struct mvs_phy *)tphy;
1995 struct mvs_info *mvi = phy->mvi;
1996 u8 phy_no;
1997
1998 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
1999 if (&mvi->phy[phy_no] == phy) {
2000 mv_dprintk("Get signature time out, reset phy %d\n",
2001 phy_no+mvi->id*mvi->chip->n_phy);
2002 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2003 }
2004 }
2005}
2006
2007static void mvs_sig_remove_timer(struct mvs_phy *phy)
2008{
2009 if (phy->timer.function)
2010 del_timer(&phy->timer);
2011 phy->timer.function = NULL;
2012}
2013
2014void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2015{
2016 u32 tmp;
2017 struct sas_ha_struct *sas_ha = mvi->sas;
2018 struct mvs_phy *phy = &mvi->phy[phy_no];
2019 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2020
2021 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2022 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2023 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2024 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2025 phy->irq_status);
2026
2027 /*
2028 * events is port event now ,
2029 * we need check the interrupt status which belongs to per port.
2030 */
2031
2032 if (phy->irq_status & PHYEV_DCDR_ERR)
2033 mv_dprintk("port %d STP decoding error.\n",
2034 phy_no+mvi->id*mvi->chip->n_phy);
2035
2036 if (phy->irq_status & PHYEV_POOF) {
2037 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2038 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2039 int ready;
2040 mvs_release_task(mvi, phy_no, NULL);
2041 phy->phy_event |= PHY_PLUG_OUT;
2042 mvs_handle_event(mvi,
2043 (void *)(unsigned long)phy_no,
2044 PHY_PLUG_EVENT);
2045 ready = mvs_is_phy_ready(mvi, phy_no);
2046 if (!ready)
2047 mv_dprintk("phy%d Unplug Notice\n",
2048 phy_no +
2049 mvi->id * mvi->chip->n_phy);
2050 if (ready || dev_sata) {
2051 if (MVS_CHIP_DISP->stp_reset)
2052 MVS_CHIP_DISP->stp_reset(mvi,
2053 phy_no);
2054 else
2055 MVS_CHIP_DISP->phy_reset(mvi,
2056 phy_no, 0);
2057 return;
2058 }
2059 }
2060 }
2061
2062 if (phy->irq_status & PHYEV_COMWAKE) {
2063 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2064 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2065 tmp | PHYEV_SIG_FIS);
2066 if (phy->timer.function == NULL) {
2067 phy->timer.data = (unsigned long)phy;
2068 phy->timer.function = mvs_sig_time_out;
2069 phy->timer.expires = jiffies + 10*HZ;
2070 add_timer(&phy->timer);
2071 }
2072 }
2073 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2074 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2075 mvs_sig_remove_timer(phy);
2076 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2077 if (phy->phy_status) {
2078 mdelay(10);
2079 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2080 if (phy->phy_type & PORT_TYPE_SATA) {
2081 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2082 mvi, phy_no);
2083 tmp &= ~PHYEV_SIG_FIS;
2084 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2085 phy_no, tmp);
2086 }
2087 mvs_update_phyinfo(mvi, phy_no, 0);
2088 mvs_bytes_dmaed(mvi, phy_no);
2089 /* whether driver is going to handle hot plug */
2090 if (phy->phy_event & PHY_PLUG_OUT) {
2091 mvs_port_notify_formed(sas_phy, 0);
2092 phy->phy_event &= ~PHY_PLUG_OUT;
2093 }
2094 } else {
2095 mv_dprintk("plugin interrupt but phy%d is gone\n",
2096 phy_no + mvi->id*mvi->chip->n_phy);
2097 }
2098 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2099 mv_dprintk("port %d broadcast change.\n",
2100 phy_no + mvi->id*mvi->chip->n_phy);
2101 /* exception for Samsung disk drive*/
2102 mdelay(1000);
2103 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2104 }
2105 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2106}
2107
2108int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2109{
2110 u32 rx_prod_idx, rx_desc;
2111 bool attn = false;
2112
2113 /* the first dword in the RX ring is special: it contains
2114 * a mirror of the hardware's RX producer index, so that
2115 * we don't have to stall the CPU reading that register.
2116 * The actual RX ring is offset by one dword, due to this.
2117 */
2118 rx_prod_idx = mvi->rx_cons;
2119 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2120 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2121 return 0;
2122
2123 /* The CMPL_Q may come late, read from register and try again
2124 * note: if coalescing is enabled,
2125 * it will need to read from register every time for sure
2126 */
2127 if (unlikely(mvi->rx_cons == rx_prod_idx))
2128 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2129
2130 if (mvi->rx_cons == rx_prod_idx)
2131 return 0;
2132
2133 while (mvi->rx_cons != rx_prod_idx) {
2134 /* increment our internal RX consumer pointer */
2135 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2136 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2137
2138 if (likely(rx_desc & RXQ_DONE))
2139 mvs_slot_complete(mvi, rx_desc, 0);
2140 if (rx_desc & RXQ_ATTN) {
2141 attn = true;
2142 } else if (rx_desc & RXQ_ERR) {
2143 if (!(rx_desc & RXQ_DONE))
2144 mvs_slot_complete(mvi, rx_desc, 0);
2145 } else if (rx_desc & RXQ_SLOT_RESET) {
2146 mvs_slot_free(mvi, rx_desc);
2147 }
2148 }
2149
2150 if (attn && self_clear)
2151 MVS_CHIP_DISP->int_full(mvi);
2152 return 0;
2153}
2154
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 000000000000..aa2270af1bac
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,406 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_SAS_H_
26#define _MV_SAS_H_
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/spinlock.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/dma-mapping.h>
35#include <linux/pci.h>
36#include <linux/platform_device.h>
37#include <linux/interrupt.h>
38#include <linux/irq.h>
39#include <linux/vmalloc.h>
40#include <scsi/libsas.h>
41#include <scsi/scsi_tcq.h>
42#include <scsi/sas_ata.h>
43#include <linux/version.h>
44#include "mv_defs.h"
45
46#define DRV_NAME "mvsas"
47#define DRV_VERSION "0.8.2"
48#define _MV_DUMP 0
49#define MVS_ID_NOT_MAPPED 0x7f
50/* #define DISABLE_HOTPLUG_DMA_FIX */
51#define MAX_EXP_RUNNING_REQ 2
52#define WIDE_PORT_MAX_PHY 4
53#define MV_DISABLE_NCQ 0
54#define mv_printk(fmt, arg ...) \
55 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
56#ifdef MV_DEBUG
57#define mv_dprintk(format, arg...) \
58 printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
59#else
60#define mv_dprintk(format, arg...)
61#endif
62#define MV_MAX_U32 0xffffffff
63
64extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch;
68
69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV))
71
72#define bit(n) ((u32)1 << n)
73
74#define for_each_phy(__lseq_mask, __mc, __lseq) \
75 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
76 (__mc) != 0 ; \
77 (++__lseq), (__mc) >>= 1)
78
79#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
80#define UNASSOC_D2H_FIS(id) \
81 ((void *) mvi->rx_fis + 0x100 * id)
82#define SATA_RECEIVED_FIS_LIST(reg_set) \
83 ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
84#define SATA_RECEIVED_SDB_FIS(reg_set) \
85 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
86#define SATA_RECEIVED_D2H_FIS(reg_set) \
87 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
88#define SATA_RECEIVED_PIO_FIS(reg_set) \
89 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
90#define SATA_RECEIVED_DMA_FIS(reg_set) \
91 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
92
93enum dev_status {
94 MVS_DEV_NORMAL = 0x0,
95 MVS_DEV_EH = 0x1,
96};
97
98
99struct mvs_info;
100
101struct mvs_dispatch {
102 char *name;
103 int (*chip_init)(struct mvs_info *mvi);
104 int (*spi_init)(struct mvs_info *mvi);
105 int (*chip_ioremap)(struct mvs_info *mvi);
106 void (*chip_iounmap)(struct mvs_info *mvi);
107 irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
108 u32 (*isr_status)(struct mvs_info *mvi, int irq);
109 void (*interrupt_enable)(struct mvs_info *mvi);
110 void (*interrupt_disable)(struct mvs_info *mvi);
111
112 u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
113 void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
114
115 u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
116 void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
117 void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
118
119 u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
120 void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
121 void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
122
123 u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
124 void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
125
126 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
127 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
128
129 void (*get_sas_addr)(void *buf, u32 buflen);
130 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
131 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
132 u32 tfs);
133 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
134 u32 (*rx_update)(struct mvs_info *mvi);
135 void (*int_full)(struct mvs_info *mvi);
136 u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
137 void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
138 u32 (*prd_size)(void);
139 u32 (*prd_count)(void);
140 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
141 void (*detect_porttype)(struct mvs_info *mvi, int i);
142 int (*oob_done)(struct mvs_info *mvi, int i);
143 void (*fix_phy_info)(struct mvs_info *mvi, int i,
144 struct sas_identify_frame *id);
145 void (*phy_work_around)(struct mvs_info *mvi, int i);
146 void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
147 struct sas_phy_linkrates *rates);
148 u32 (*phy_max_link_rate)(void);
149 void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
150 void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
151 void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
152 void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
153 void (*clear_active_cmds)(struct mvs_info *mvi);
154 u32 (*spi_read_data)(struct mvs_info *mvi);
155 void (*spi_write_data)(struct mvs_info *mvi, u32 data);
156 int (*spi_buildcmd)(struct mvs_info *mvi,
157 u32 *dwCmd,
158 u8 cmd,
159 u8 read,
160 u8 length,
161 u32 addr
162 );
163 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
164 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
165#ifndef DISABLE_HOTPLUG_DMA_FIX
166 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
167#endif
168
169};
170
171struct mvs_chip_info {
172 u32 n_host;
173 u32 n_phy;
174 u32 fis_offs;
175 u32 fis_count;
176 u32 srs_sz;
177 u32 slot_width;
178 const struct mvs_dispatch *dispatch;
179};
180#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
181#define MVS_RX_FISL_SZ \
182 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
183#define MVS_CHIP_DISP (mvi->chip->dispatch)
184
185struct mvs_err_info {
186 __le32 flags;
187 __le32 flags2;
188};
189
190struct mvs_cmd_hdr {
191 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
192 __le32 lens; /* cmd, max resp frame len */
193 __le32 tags; /* targ port xfer tag; tag */
194 __le32 data_len; /* data xfer len */
195 __le64 cmd_tbl; /* command table address */
196 __le64 open_frame; /* open addr frame address */
197 __le64 status_buf; /* status buffer address */
198 __le64 prd_tbl; /* PRD tbl address */
199 __le32 reserved[4];
200};
201
202struct mvs_port {
203 struct asd_sas_port sas_port;
204 u8 port_attached;
205 u8 wide_port_phymap;
206 struct list_head list;
207};
208
209struct mvs_phy {
210 struct mvs_info *mvi;
211 struct mvs_port *port;
212 struct asd_sas_phy sas_phy;
213 struct sas_identify identify;
214 struct scsi_device *sdev;
215 struct timer_list timer;
216 u64 dev_sas_addr;
217 u64 att_dev_sas_addr;
218 u32 att_dev_info;
219 u32 dev_info;
220 u32 phy_type;
221 u32 phy_status;
222 u32 irq_status;
223 u32 frame_rcvd_size;
224 u8 frame_rcvd[32];
225 u8 phy_attached;
226 u8 phy_mode;
227 u8 reserved[2];
228 u32 phy_event;
229 enum sas_linkrate minimum_linkrate;
230 enum sas_linkrate maximum_linkrate;
231};
232
233struct mvs_device {
234 struct list_head dev_entry;
235 enum sas_dev_type dev_type;
236 struct mvs_info *mvi_info;
237 struct domain_device *sas_device;
238 u32 attached_phy;
239 u32 device_id;
240 u32 runing_req;
241 u8 taskfileset;
242 u8 dev_status;
243 u16 reserved;
244};
245
246struct mvs_slot_info {
247 struct list_head entry;
248 union {
249 struct sas_task *task;
250 void *tdata;
251 };
252 u32 n_elem;
253 u32 tx;
254 u32 slot_tag;
255
256 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
257 * and PRD table
258 */
259 void *buf;
260 dma_addr_t buf_dma;
261#if _MV_DUMP
262 u32 cmd_size;
263#endif
264 void *response;
265 struct mvs_port *port;
266 struct mvs_device *device;
267 void *open_frame;
268};
269
270struct mvs_info {
271 unsigned long flags;
272
273 /* host-wide lock */
274 spinlock_t lock;
275
276 /* our device */
277 struct pci_dev *pdev;
278 struct device *dev;
279
280 /* enhanced mode registers */
281 void __iomem *regs;
282
283 /* peripheral or soc registers */
284 void __iomem *regs_ex;
285 u8 sas_addr[SAS_ADDR_SIZE];
286
287 /* SCSI/SAS glue */
288 struct sas_ha_struct *sas;
289 struct Scsi_Host *shost;
290
291 /* TX (delivery) DMA ring */
292 __le32 *tx;
293 dma_addr_t tx_dma;
294
295 /* cached next-producer idx */
296 u32 tx_prod;
297
298 /* RX (completion) DMA ring */
299 __le32 *rx;
300 dma_addr_t rx_dma;
301
302 /* RX consumer idx */
303 u32 rx_cons;
304
305 /* RX'd FIS area */
306 __le32 *rx_fis;
307 dma_addr_t rx_fis_dma;
308
309 /* DMA command header slots */
310 struct mvs_cmd_hdr *slot;
311 dma_addr_t slot_dma;
312
313 u32 chip_id;
314 const struct mvs_chip_info *chip;
315
316 int tags_num;
317 DECLARE_BITMAP(tags, MVS_SLOTS);
318 /* further per-slot information */
319 struct mvs_phy phy[MVS_MAX_PHYS];
320 struct mvs_port port[MVS_MAX_PHYS];
321 u32 irq;
322 u32 exp_req;
323 u32 id;
324 u64 sata_reg_set;
325 struct list_head *hba_list;
326 struct list_head soc_entry;
327 struct list_head wq_list;
328 unsigned long instance;
329 u16 flashid;
330 u32 flashsize;
331 u32 flashsectSize;
332
333 void *addon;
334 struct mvs_device devices[MVS_MAX_DEVICES];
335#ifndef DISABLE_HOTPLUG_DMA_FIX
336 void *bulk_buffer;
337 dma_addr_t bulk_buffer_dma;
338#define TRASH_BUCKET_SIZE 0x20000
339#endif
340 struct mvs_slot_info slot_info[0];
341};
342
343struct mvs_prv_info{
344 u8 n_host;
345 u8 n_phy;
346 u16 reserve;
347 struct mvs_info *mvi[2];
348};
349
350struct mvs_wq {
351 struct delayed_work work_q;
352 struct mvs_info *mvi;
353 void *data;
354 int handler;
355 struct list_head entry;
356};
357
358struct mvs_task_exec_info {
359 struct sas_task *task;
360 struct mvs_cmd_hdr *hdr;
361 struct mvs_port *port;
362 u32 tag;
363 int n_elem;
364};
365
366
367/******************** function prototype *********************/
368void mvs_get_sas_addr(void *buf, u32 buflen);
369void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
370void mvs_tag_free(struct mvs_info *mvi, u32 tag);
371void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
372int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
373void mvs_tag_init(struct mvs_info *mvi);
374void mvs_iounmap(void __iomem *regs);
375int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
376void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
377int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
378 void *funcdata);
379void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
380 u32 off_lo, u32 off_hi, u64 sas_addr);
381int mvs_slave_alloc(struct scsi_device *scsi_dev);
382int mvs_slave_configure(struct scsi_device *sdev);
383void mvs_scan_start(struct Scsi_Host *shost);
384int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
385int mvs_queue_command(struct sas_task *task, const int num,
386 gfp_t gfp_flags);
387int mvs_abort_task(struct sas_task *task);
388int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
389int mvs_clear_aca(struct domain_device *dev, u8 *lun);
390int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
391void mvs_port_formed(struct asd_sas_phy *sas_phy);
392void mvs_port_deformed(struct asd_sas_phy *sas_phy);
393int mvs_dev_found(struct domain_device *dev);
394void mvs_dev_gone(struct domain_device *dev);
395int mvs_lu_reset(struct domain_device *dev, u8 *lun);
396int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
397int mvs_I_T_nexus_reset(struct domain_device *dev);
398int mvs_query_task(struct sas_task *task);
399void mvs_release_task(struct mvs_info *mvi, int phy_no,
400 struct domain_device *dev);
401void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
402void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
403int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
404void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
405#endif
406
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 0e207aa67d16..5fd73d77c3af 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -11,31 +11,6 @@
11# it under the terms of the GNU General Public License version 2 11# it under the terms of the GNU General Public License version 2
12# 12#
13 13
14ifneq ($(OSD_INC),)
15# we are built out-of-tree Kconfigure everything as on
16
17CONFIG_SCSI_OSD_INITIATOR=m
18ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
19
20CONFIG_SCSI_OSD_ULD=m
21ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
22
23# CONFIG_SCSI_OSD_DPRINT_SENSE =
24# 0 - no print of errors
25# 1 - print errors
26# 2 - errors + warrnings
27ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
28
29# Uncomment to turn debug on
30# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
31
32# if we are built out-of-tree and the hosting kernel has OSD headers
33# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
34# this it will work. This might break in future kernels
35LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
36
37endif
38
39# libosd.ko - osd-initiator library 14# libosd.ko - osd-initiator library
40libosd-y := osd_initiator.o 15libosd-y := osd_initiator.o
41obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o 16obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755
index d905344f83ba..000000000000
--- a/drivers/scsi/osd/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the OSD modules (out of tree)
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13# This Makefile is used to call the kernel Makefile in case of an out-of-tree
14# build.
15# $KSRC should point to a Kernel source tree otherwise host's default is
16# used. (eg. /lib/modules/`uname -r`/build)
17
18# include path for out-of-tree Headers
19OSD_INC ?= `pwd`/../../../include
20
21# allow users to override these
22# e.g. to compile for a kernel that you aren't currently running
23KSRC ?= /lib/modules/$(shell uname -r)/build
24KBUILD_OUTPUT ?=
25ARCH ?=
26V ?= 0
27
28# this is the basic Kbuild out-of-tree invocation, with the M= option
29KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
30
31all: libosd
32
33libosd: ;
34 $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
35
36clean:
37 $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 5776b2ab6b12..7a117c18114c 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
118 _osd_ver_desc(or)); 118 _osd_ver_desc(or));
119 119
120 pFirst = get_attrs[a++].val_ptr; 120 pFirst = get_attrs[a++].val_ptr;
121 OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", 121 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
122 (char *)pFirst); 122 (char *)pFirst);
123 123
124 pFirst = get_attrs[a++].val_ptr; 124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", 125 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
126 (char *)pFirst); 126 (char *)pFirst);
127 127
128 pFirst = get_attrs[a++].val_ptr; 128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", 129 OSD_INFO("PRODUCT_MODEL [%s]\n",
130 (char *)pFirst); 130 (char *)pFirst);
131 131
132 pFirst = get_attrs[a++].val_ptr; 132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", 133 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
134 pFirst ? get_unaligned_be32(pFirst) : ~0U); 134 pFirst ? get_unaligned_be32(pFirst) : ~0U);
135 135
136 pFirst = get_attrs[a++].val_ptr; 136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", 137 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst); 138 (char *)pFirst);
139 139
140 pFirst = get_attrs[a].val_ptr; 140 pFirst = get_attrs[a].val_ptr;
141 OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); 141 OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst);
142 a++; 142 a++;
143 143
144 pFirst = get_attrs[a++].val_ptr; 144 pFirst = get_attrs[a++].val_ptr;
145 OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", 145 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
147 147
148 pFirst = get_attrs[a++].val_ptr; 148 pFirst = get_attrs[a++].val_ptr;
149 OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", 149 OSD_INFO("USED_CAPACITY [0x%llx]\n",
150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
151 151
152 pFirst = get_attrs[a++].val_ptr; 152 pFirst = get_attrs[a++].val_ptr;
153 OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", 153 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155 155
156 if (a >= nelem) 156 if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
158 158
159 /* FIXME: Where are the time utilities */ 159 /* FIXME: Where are the time utilities */
160 pFirst = get_attrs[a++].val_ptr; 160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", 161 OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
162 ((char *)pFirst)[0], ((char *)pFirst)[1], 162 ((char *)pFirst)[0], ((char *)pFirst)[1],
163 ((char *)pFirst)[2], ((char *)pFirst)[3], 163 ((char *)pFirst)[2], ((char *)pFirst)[3],
164 ((char *)pFirst)[4], ((char *)pFirst)[5]); 164 ((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
169 169
170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, 170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
171 sid_dump, sizeof(sid_dump), true); 171 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); 172 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
173 " [%s]\n", len, sid_dump);
173 a++; 174 a++;
174 } 175 }
175out: 176out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
669 __be16 action, const struct osd_obj_id *obj, osd_id initial_id, 670 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
670 struct osd_obj_id_list *list, unsigned nelem) 671 struct osd_obj_id_list *list, unsigned nelem)
671{ 672{
672 struct request_queue *q = or->osd_dev->scsi_device->request_queue; 673 struct request_queue *q = osd_request_queue(or->osd_dev);
673 u64 len = nelem * sizeof(osd_id) + sizeof(*list); 674 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
674 struct bio *bio; 675 struct bio *bio;
675 676
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
778*/ 779*/
779 780
780void osd_req_write(struct osd_request *or, 781void osd_req_write(struct osd_request *or,
781 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 782 const struct osd_obj_id *obj, u64 offset,
783 struct bio *bio, u64 len)
782{ 784{
783 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); 785 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
784 WARN_ON(or->out.bio || or->out.total_bytes); 786 WARN_ON(or->out.bio || or->out.total_bytes);
785 bio->bi_rw |= (1 << BIO_RW); 787 WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
786 or->out.bio = bio; 788 or->out.bio = bio;
787 or->out.total_bytes = bio->bi_size; 789 or->out.total_bytes = len;
788} 790}
789EXPORT_SYMBOL(osd_req_write); 791EXPORT_SYMBOL(osd_req_write);
790 792
793int osd_req_write_kern(struct osd_request *or,
794 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
795{
796 struct request_queue *req_q = osd_request_queue(or->osd_dev);
797 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
798
799 if (IS_ERR(bio))
800 return PTR_ERR(bio);
801
802 bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
803 osd_req_write(or, obj, offset, bio, len);
804 return 0;
805}
806EXPORT_SYMBOL(osd_req_write_kern);
807
791/*TODO: void osd_req_append(struct osd_request *, 808/*TODO: void osd_req_append(struct osd_request *,
792 const struct osd_obj_id *, struct bio *data_out); */ 809 const struct osd_obj_id *, struct bio *data_out); */
793/*TODO: void osd_req_create_write(struct osd_request *, 810/*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
813EXPORT_SYMBOL(osd_req_flush_object); 830EXPORT_SYMBOL(osd_req_flush_object);
814 831
815void osd_req_read(struct osd_request *or, 832void osd_req_read(struct osd_request *or,
816 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 833 const struct osd_obj_id *obj, u64 offset,
834 struct bio *bio, u64 len)
817{ 835{
818 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); 836 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
819 WARN_ON(or->in.bio || or->in.total_bytes); 837 WARN_ON(or->in.bio || or->in.total_bytes);
820 bio->bi_rw &= ~(1 << BIO_RW); 838 WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
821 or->in.bio = bio; 839 or->in.bio = bio;
822 or->in.total_bytes = bio->bi_size; 840 or->in.total_bytes = len;
823} 841}
824EXPORT_SYMBOL(osd_req_read); 842EXPORT_SYMBOL(osd_req_read);
825 843
844int osd_req_read_kern(struct osd_request *or,
845 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
846{
847 struct request_queue *req_q = osd_request_queue(or->osd_dev);
848 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
849
850 if (IS_ERR(bio))
851 return PTR_ERR(bio);
852
853 osd_req_read(or, obj, offset, bio, len);
854 return 0;
855}
856EXPORT_SYMBOL(osd_req_read_kern);
857
826void osd_req_get_attributes(struct osd_request *or, 858void osd_req_get_attributes(struct osd_request *or,
827 const struct osd_obj_id *obj) 859 const struct osd_obj_id *obj)
828{ 860{
@@ -1213,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
1213} 1245}
1214 1246
1215static int _osd_req_finalize_data_integrity(struct osd_request *or, 1247static int _osd_req_finalize_data_integrity(struct osd_request *or,
1216 bool has_in, bool has_out, const u8 *cap_key) 1248 bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
1217{ 1249{
1218 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1250 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1219 int ret; 1251 int ret;
@@ -1228,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1228 }; 1260 };
1229 unsigned pad; 1261 unsigned pad;
1230 1262
1231 or->out_data_integ.data_bytes = cpu_to_be64( 1263 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1232 or->out.bio ? or->out.bio->bi_size : 0);
1233 or->out_data_integ.set_attributes_bytes = cpu_to_be64( 1264 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1234 or->set_attr.total_bytes); 1265 or->set_attr.total_bytes);
1235 or->out_data_integ.get_attributes_bytes = cpu_to_be64( 1266 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1306,6 +1337,8 @@ static int _init_blk_request(struct osd_request *or,
1306 1337
1307 or->request = req; 1338 or->request = req;
1308 req->cmd_type = REQ_TYPE_BLOCK_PC; 1339 req->cmd_type = REQ_TYPE_BLOCK_PC;
1340 req->cmd_flags |= REQ_QUIET;
1341
1309 req->timeout = or->timeout; 1342 req->timeout = or->timeout;
1310 req->retries = or->retries; 1343 req->retries = or->retries;
1311 req->sense = or->sense; 1344 req->sense = or->sense;
@@ -1339,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
1339{ 1372{
1340 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1373 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1341 bool has_in, has_out; 1374 bool has_in, has_out;
1375 u64 out_data_bytes = or->out.total_bytes;
1342 int ret; 1376 int ret;
1343 1377
1344 if (options & OSD_REQ_FUA) 1378 if (options & OSD_REQ_FUA)
@@ -1388,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
1388 } 1422 }
1389 } 1423 }
1390 1424
1391 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); 1425 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1426 out_data_bytes, cap_key);
1392 if (ret) 1427 if (ret)
1393 return ret; 1428 return ret;
1394 1429
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 22b59e13ba83..0bdef3390902 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -49,6 +49,7 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/idr.h> 50#include <linux/idr.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/file.h>
52 53
53#include <scsi/scsi.h> 54#include <scsi/scsi.h>
54#include <scsi/scsi_driver.h> 55#include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
175 176
176struct osd_dev *osduld_path_lookup(const char *name) 177struct osd_dev *osduld_path_lookup(const char *name)
177{ 178{
178 struct path path; 179 struct osd_uld_device *oud;
179 struct inode *inode; 180 struct osd_dev *od;
180 struct cdev *cdev; 181 struct file *file;
181 struct osd_uld_device *uninitialized_var(oud);
182 int error; 182 int error;
183 183
184 if (!name || !*name) { 184 if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
186 return ERR_PTR(-EINVAL); 186 return ERR_PTR(-EINVAL);
187 } 187 }
188 188
189 error = kern_path(name, LOOKUP_FOLLOW, &path); 189 od = kzalloc(sizeof(*od), GFP_KERNEL);
190 if (error) { 190 if (!od)
191 OSD_ERR("path_lookup of %s failed=>%d\n", name, error); 191 return ERR_PTR(-ENOMEM);
192 return ERR_PTR(error);
193 }
194 192
195 inode = path.dentry->d_inode; 193 file = filp_open(name, O_RDWR, 0);
196 error = -EINVAL; /* Not the right device e.g osd_uld_device */ 194 if (IS_ERR(file)) {
197 if (!S_ISCHR(inode->i_mode)) { 195 error = PTR_ERR(file);
198 OSD_DEBUG("!S_ISCHR()\n"); 196 goto free_od;
199 goto out;
200 } 197 }
201 198
202 cdev = inode->i_cdev; 199 if (file->f_op != &osd_fops){
203 if (!cdev) { 200 error = -EINVAL;
204 OSD_ERR("Before mounting an OSD Based filesystem\n"); 201 goto close_file;
205 OSD_ERR(" user-mode must open+close the %s device\n", name);
206 OSD_ERR(" Example: bash: echo < %s\n", name);
207 goto out;
208 } 202 }
209 203
210 /* The Magic wand. Is it our char-dev */ 204 oud = file->private_data;
211 /* TODO: Support sg devices */
212 if (cdev->owner != THIS_MODULE) {
213 OSD_ERR("Error mounting %s - is not an OSD device\n", name);
214 goto out;
215 }
216 205
217 oud = container_of(cdev, struct osd_uld_device, cdev); 206 *od = oud->od;
207 od->file = file;
218 208
219 __uld_get(oud); 209 return od;
220 error = 0;
221 210
222out: 211close_file:
223 path_put(&path); 212 fput(file);
224 return error ? ERR_PTR(error) : &oud->od; 213free_od:
214 kfree(od);
215 return ERR_PTR(error);
225} 216}
226EXPORT_SYMBOL(osduld_path_lookup); 217EXPORT_SYMBOL(osduld_path_lookup);
227 218
228void osduld_put_device(struct osd_dev *od) 219void osduld_put_device(struct osd_dev *od)
229{ 220{
230 if (od) {
231 struct osd_uld_device *oud = container_of(od,
232 struct osd_uld_device, od);
233 221
234 __uld_put(oud); 222 if (od && !IS_ERR(od)) {
223 struct osd_uld_device *oud = od->file->private_data;
224
225 BUG_ON(od->scsi_device != oud->od.scsi_device);
226
227 fput(od->file);
228 kfree(od);
235 } 229 }
236} 230}
237EXPORT_SYMBOL(osduld_put_device); 231EXPORT_SYMBOL(osduld_put_device);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5defe5ea5eda..8371d917a9a2 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,12 @@
17* General Public License for more details. 17* General Public License for more details.
18* 18*
19******************************************************************************/ 19******************************************************************************/
20#define QLA1280_VERSION "3.26" 20#define QLA1280_VERSION "3.27"
21/***************************************************************************** 21/*****************************************************************************
22 Revision History: 22 Revision History:
23 Rev 3.27, February 10, 2009, Michael Reed
24 - General code cleanup.
25 - Improve error recovery.
23 Rev 3.26, January 16, 2006 Jes Sorensen 26 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support 27 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig 28 Rev 3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
435 uint8_t, uint16_t *); 438 uint8_t, uint16_t *);
436static int qla1280_bus_reset(struct scsi_qla_host *, int); 439static int qla1280_bus_reset(struct scsi_qla_host *, int);
437static int qla1280_device_reset(struct scsi_qla_host *, int, int); 440static int qla1280_device_reset(struct scsi_qla_host *, int, int);
438static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
439static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 441static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
440static int qla1280_abort_isp(struct scsi_qla_host *); 442static int qla1280_abort_isp(struct scsi_qla_host *);
441#ifdef QLA_64BIT_PTR 443#ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
698} 700}
699 701
700/************************************************************************** 702/**************************************************************************
701 * qla1200_queuecommand 703 * qla1280_queuecommand
702 * Queue a command to the controller. 704 * Queue a command to the controller.
703 * 705 *
704 * Note: 706 * Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
713{ 715{
714 struct Scsi_Host *host = cmd->device->host; 716 struct Scsi_Host *host = cmd->device->host;
715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 717 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
716 struct srb *sp = (struct srb *)&cmd->SCp; 718 struct srb *sp = (struct srb *)CMD_SP(cmd);
717 int status; 719 int status;
718 720
719 cmd->scsi_done = fn; 721 cmd->scsi_done = fn;
720 sp->cmd = cmd; 722 sp->cmd = cmd;
721 sp->flags = 0; 723 sp->flags = 0;
724 sp->wait = NULL;
725 CMD_HANDLE(cmd) = (unsigned char *)NULL;
722 726
723 qla1280_print_scsi_cmd(5, cmd); 727 qla1280_print_scsi_cmd(5, cmd);
724 728
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
738 742
739enum action { 743enum action {
740 ABORT_COMMAND, 744 ABORT_COMMAND,
741 ABORT_DEVICE,
742 DEVICE_RESET, 745 DEVICE_RESET,
743 BUS_RESET, 746 BUS_RESET,
744 ADAPTER_RESET, 747 ADAPTER_RESET,
745 FAIL
746}; 748};
747 749
748/* timer action for error action processor */
749static void qla1280_error_wait_timeout(unsigned long __data)
750{
751 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
752 struct srb *sp = (struct srb *)CMD_SP(cmd);
753
754 complete(sp->wait);
755}
756 750
757static void qla1280_mailbox_timeout(unsigned long __data) 751static void qla1280_mailbox_timeout(unsigned long __data)
758{ 752{
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
767 complete(ha->mailbox_wait); 761 complete(ha->mailbox_wait);
768} 762}
769 763
764static int
765_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
766 struct completion *wait)
767{
768 int status = FAILED;
769 struct scsi_cmnd *cmd = sp->cmd;
770
771 spin_unlock_irq(ha->host->host_lock);
772 wait_for_completion_timeout(wait, 4*HZ);
773 spin_lock_irq(ha->host->host_lock);
774 sp->wait = NULL;
775 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
776 status = SUCCESS;
777 (*cmd->scsi_done)(cmd);
778 }
779 return status;
780}
781
782static int
783qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
784{
785 DECLARE_COMPLETION_ONSTACK(wait);
786
787 sp->wait = &wait;
788 return _qla1280_wait_for_single_command(ha, sp, &wait);
789}
790
791static int
792qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
793{
794 int cnt;
795 int status;
796 struct srb *sp;
797 struct scsi_cmnd *cmd;
798
799 status = SUCCESS;
800
801 /*
802 * Wait for all commands with the designated bus/target
803 * to be completed by the firmware
804 */
805 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
806 sp = ha->outstanding_cmds[cnt];
807 if (sp) {
808 cmd = sp->cmd;
809
810 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
811 continue;
812 if (target >= 0 && SCSI_TCN_32(cmd) != target)
813 continue;
814
815 status = qla1280_wait_for_single_command(ha, sp);
816 if (status == FAILED)
817 break;
818 }
819 }
820 return status;
821}
822
770/************************************************************************** 823/**************************************************************************
771 * qla1200_error_action 824 * qla1280_error_action
772 * The function will attempt to perform a specified error action and 825 * The function will attempt to perform a specified error action and
773 * wait for the results (or time out). 826 * wait for the results (or time out).
774 * 827 *
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
780 * Returns: 833 * Returns:
781 * SUCCESS or FAILED 834 * SUCCESS or FAILED
782 * 835 *
783 * Note:
784 * Resetting the bus always succeeds - is has to, otherwise the
785 * kernel will panic! Try a surgical technique - sending a BUS
786 * DEVICE RESET message - on the offending target before pulling
787 * the SCSI bus reset line.
788 **************************************************************************/ 836 **************************************************************************/
789static int 837static int
790qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 838qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
792 struct scsi_qla_host *ha; 840 struct scsi_qla_host *ha;
793 int bus, target, lun; 841 int bus, target, lun;
794 struct srb *sp; 842 struct srb *sp;
795 uint16_t data; 843 int i, found;
796 unsigned char *handle; 844 int result=FAILED;
797 int result, i; 845 int wait_for_bus=-1;
846 int wait_for_target = -1;
798 DECLARE_COMPLETION_ONSTACK(wait); 847 DECLARE_COMPLETION_ONSTACK(wait);
799 struct timer_list timer; 848
849 ENTER("qla1280_error_action");
800 850
801 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 851 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
852 sp = (struct srb *)CMD_SP(cmd);
853 bus = SCSI_BUS_32(cmd);
854 target = SCSI_TCN_32(cmd);
855 lun = SCSI_LUN_32(cmd);
802 856
803 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 857 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
804 RD_REG_WORD(&ha->iobase->istatus)); 858 RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
807 RD_REG_WORD(&ha->iobase->host_cmd), 861 RD_REG_WORD(&ha->iobase->host_cmd),
808 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 862 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
809 863
810 ENTER("qla1280_error_action");
811 if (qla1280_verbose) 864 if (qla1280_verbose)
812 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 865 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
813 "Handle=0x%p, action=0x%x\n", 866 "Handle=0x%p, action=0x%x\n",
814 ha->host_no, cmd, CMD_HANDLE(cmd), action); 867 ha->host_no, cmd, CMD_HANDLE(cmd), action);
815 868
816 if (cmd == NULL) {
817 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
818 "si_Cmnd pointer, failing.\n");
819 LEAVE("qla1280_error_action");
820 return FAILED;
821 }
822
823 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
824 sp = (struct srb *)CMD_SP(cmd);
825 handle = CMD_HANDLE(cmd);
826
827 /* Check for pending interrupts. */
828 data = qla1280_debounce_register(&ha->iobase->istatus);
829 /*
830 * The io_request_lock is held when the reset handler is called, hence
831 * the interrupt handler cannot be running in parallel as it also
832 * grabs the lock. /Jes
833 */
834 if (data & RISC_INT)
835 qla1280_isr(ha, &ha->done_q);
836
837 /* 869 /*
838 * Determine the suggested action that the mid-level driver wants 870 * Check to see if we have the command in the outstanding_cmds[]
839 * us to perform. 871 * array. If not then it must have completed before this error
872 * action was initiated. If the error_action isn't ABORT_COMMAND
873 * then the driver must proceed with the requested action.
840 */ 874 */
841 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 875 found = -1;
842 if(action == ABORT_COMMAND) { 876 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
843 /* we never got this command */ 877 if (sp == ha->outstanding_cmds[i]) {
844 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 878 found = i;
845 return SUCCESS; /* no action - we don't have command */ 879 sp->wait = &wait; /* we'll wait for it to complete */
880 break;
846 } 881 }
847 } else {
848 sp->wait = &wait;
849 } 882 }
850 883
851 bus = SCSI_BUS_32(cmd); 884 if (found < 0) { /* driver doesn't have command */
852 target = SCSI_TCN_32(cmd); 885 result = SUCCESS;
853 lun = SCSI_LUN_32(cmd); 886 if (qla1280_verbose) {
887 printk(KERN_INFO
888 "scsi(%ld:%d:%d:%d): specified command has "
889 "already completed.\n", ha->host_no, bus,
890 target, lun);
891 }
892 }
854 893
855 /* Overloading result. Here it means the success or fail of the
856 * *issue* of the action. When we return from the routine, it must
857 * mean the actual success or fail of the action */
858 result = FAILED;
859 switch (action) { 894 switch (action) {
860 case FAIL:
861 break;
862 895
863 case ABORT_COMMAND: 896 case ABORT_COMMAND:
864 if ((sp->flags & SRB_ABORT_PENDING)) { 897 dprintk(1, "qla1280: RISC aborting command\n");
865 printk(KERN_WARNING 898 /*
866 "scsi(): Command has a pending abort " 899 * The abort might fail due to race when the host_lock
867 "message - ABORT_PENDING.\n"); 900 * is released to issue the abort. As such, we
868 /* This should technically be impossible since we 901 * don't bother to check the return status.
869 * now wait for abort completion */ 902 */
870 break; 903 if (found >= 0)
871 } 904 qla1280_abort_command(ha, sp, found);
872
873 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
874 if (sp == ha->outstanding_cmds[i]) {
875 dprintk(1, "qla1280: RISC aborting command\n");
876 if (qla1280_abort_command(ha, sp, i) == 0)
877 result = SUCCESS;
878 else {
879 /*
880 * Since we don't know what might
881 * have happend to the command, it
882 * is unsafe to remove it from the
883 * device's queue at this point.
884 * Wait and let the escalation
885 * process take care of it.
886 */
887 printk(KERN_WARNING
888 "scsi(%li:%i:%i:%i): Unable"
889 " to abort command!\n",
890 ha->host_no, bus, target, lun);
891 }
892 }
893 }
894 break;
895
896 case ABORT_DEVICE:
897 if (qla1280_verbose)
898 printk(KERN_INFO
899 "scsi(%ld:%d:%d:%d): Queueing abort device "
900 "command.\n", ha->host_no, bus, target, lun);
901 if (qla1280_abort_device(ha, bus, target, lun) == 0)
902 result = SUCCESS;
903 break; 905 break;
904 906
905 case DEVICE_RESET: 907 case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
907 printk(KERN_INFO 909 printk(KERN_INFO
908 "scsi(%ld:%d:%d:%d): Queueing device reset " 910 "scsi(%ld:%d:%d:%d): Queueing device reset "
909 "command.\n", ha->host_no, bus, target, lun); 911 "command.\n", ha->host_no, bus, target, lun);
910 if (qla1280_device_reset(ha, bus, target) == 0) 912 if (qla1280_device_reset(ha, bus, target) == 0) {
911 result = SUCCESS; 913 /* issued device reset, set wait conditions */
914 wait_for_bus = bus;
915 wait_for_target = target;
916 }
912 break; 917 break;
913 918
914 case BUS_RESET: 919 case BUS_RESET:
915 if (qla1280_verbose) 920 if (qla1280_verbose)
916 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 921 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
917 "reset.\n", ha->host_no, bus); 922 "reset.\n", ha->host_no, bus);
918 if (qla1280_bus_reset(ha, bus) == 0) 923 if (qla1280_bus_reset(ha, bus) == 0) {
919 result = SUCCESS; 924 /* issued bus reset, set wait conditions */
925 wait_for_bus = bus;
926 }
920 break; 927 break;
921 928
922 case ADAPTER_RESET: 929 case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
929 "continue automatically\n", ha->host_no); 936 "continue automatically\n", ha->host_no);
930 } 937 }
931 ha->flags.reset_active = 1; 938 ha->flags.reset_active = 1;
932 /* 939
933 * We restarted all of the commands automatically, so the 940 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
934 * mid-level code can expect completions momentitarily. 941 result = FAILED;
935 */ 942 }
936 if (qla1280_abort_isp(ha) == 0)
937 result = SUCCESS;
938 943
939 ha->flags.reset_active = 0; 944 ha->flags.reset_active = 0;
940 } 945 }
941 946
942 if (!list_empty(&ha->done_q)) 947 /*
943 qla1280_done(ha); 948 * At this point, the host_lock has been released and retaken
944 949 * by the issuance of the mailbox command.
945 /* If we didn't manage to issue the action, or we have no 950 * Wait for the command passed in by the mid-layer if it
946 * command to wait for, exit here */ 951 * was found by the driver. It might have been returned
947 if (result == FAILED || handle == NULL || 952 * between eh recovery steps, hence the check of the "found"
948 handle == (unsigned char *)INVALID_HANDLE) { 953 * variable.
949 /* 954 */
950 * Clear completion queue to avoid qla1280_done() trying
951 * to complete the command at a later stage after we
952 * have exited the current context
953 */
954 sp->wait = NULL;
955 goto leave;
956 }
957 955
958 /* set up a timer just in case we're really jammed */ 956 if (found >= 0)
959 init_timer(&timer); 957 result = _qla1280_wait_for_single_command(ha, sp, &wait);
960 timer.expires = jiffies + 4*HZ;
961 timer.data = (unsigned long)cmd;
962 timer.function = qla1280_error_wait_timeout;
963 add_timer(&timer);
964 958
965 /* wait for the action to complete (or the timer to expire) */ 959 if (action == ABORT_COMMAND && result != SUCCESS) {
966 spin_unlock_irq(ha->host->host_lock); 960 printk(KERN_WARNING
967 wait_for_completion(&wait); 961 "scsi(%li:%i:%i:%i): "
968 del_timer_sync(&timer); 962 "Unable to abort command!\n",
969 spin_lock_irq(ha->host->host_lock); 963 ha->host_no, bus, target, lun);
970 sp->wait = NULL; 964 }
971 965
972 /* the only action we might get a fail for is abort */ 966 /*
973 if (action == ABORT_COMMAND) { 967 * If the command passed in by the mid-layer has been
974 if(sp->flags & SRB_ABORTED) 968 * returned by the board, then wait for any additional
975 result = SUCCESS; 969 * commands which are supposed to complete based upon
976 else 970 * the error action.
977 result = FAILED; 971 *
972 * All commands are unconditionally returned during a
973 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
974 * to wait for them.
975 */
976 if (result == SUCCESS && wait_for_bus >= 0) {
977 result = qla1280_wait_for_pending_commands(ha,
978 wait_for_bus, wait_for_target);
978 } 979 }
979 980
980 leave:
981 dprintk(1, "RESET returning %d\n", result); 981 dprintk(1, "RESET returning %d\n", result);
982 982
983 LEAVE("qla1280_error_action"); 983 LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
1280 switch ((CMD_RESULT(cmd) >> 16)) { 1280 switch ((CMD_RESULT(cmd) >> 16)) {
1281 case DID_RESET: 1281 case DID_RESET:
1282 /* Issue marker command. */ 1282 /* Issue marker command. */
1283 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1283 if (!ha->flags.abort_isp_active)
1284 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1284 break; 1285 break;
1285 case DID_ABORT: 1286 case DID_ABORT:
1286 sp->flags &= ~SRB_ABORT_PENDING; 1287 sp->flags &= ~SRB_ABORT_PENDING;
1287 sp->flags |= SRB_ABORTED; 1288 sp->flags |= SRB_ABORTED;
1288 if (sp->flags & SRB_TIMEOUT)
1289 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1290 break; 1289 break;
1291 default: 1290 default:
1292 break; 1291 break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
1296 scsi_dma_unmap(cmd); 1295 scsi_dma_unmap(cmd);
1297 1296
1298 /* Call the mid-level driver interrupt handler */ 1297 /* Call the mid-level driver interrupt handler */
1299 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1300 ha->actthreads--; 1298 ha->actthreads--;
1301 1299
1302 (*(cmd)->scsi_done)(cmd); 1300 if (sp->wait == NULL)
1303 1301 (*(cmd)->scsi_done)(cmd);
1304 if(sp->wait != NULL) 1302 else
1305 complete(sp->wait); 1303 complete(sp->wait);
1306 } 1304 }
1307 LEAVE("qla1280_done"); 1305 LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
2417qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2415qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2418{ 2416{
2419 struct device_reg __iomem *reg = ha->iobase; 2417 struct device_reg __iomem *reg = ha->iobase;
2420#if 0
2421 LIST_HEAD(done_q);
2422#endif
2423 int status = 0; 2418 int status = 0;
2424 int cnt; 2419 int cnt;
2425 uint16_t *optr, *iptr; 2420 uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2493 mr = MAILBOX_REGISTER_COUNT; 2488 mr = MAILBOX_REGISTER_COUNT;
2494 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2489 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2495 2490
2496#if 0
2497 /* Go check for any response interrupts pending. */
2498 qla1280_isr(ha, &done_q);
2499#endif
2500
2501 if (ha->flags.reset_marker) 2491 if (ha->flags.reset_marker)
2502 qla1280_rst_aen(ha); 2492 qla1280_rst_aen(ha);
2503 2493
2504#if 0
2505 if (!list_empty(&done_q))
2506 qla1280_done(ha, &done_q);
2507#endif
2508
2509 if (status) 2494 if (status)
2510 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2495 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2511 "0x%x ****\n", mb[0]); 2496 "0x%x ****\n", mb[0]);
@@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2641} 2626}
2642 2627
2643/* 2628/*
2644 * qla1280_abort_device
2645 * Issue an abort message to the device
2646 *
2647 * Input:
2648 * ha = adapter block pointer.
2649 * bus = SCSI BUS.
2650 * target = SCSI ID.
2651 * lun = SCSI LUN.
2652 *
2653 * Returns:
2654 * 0 = success
2655 */
2656static int
2657qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2658{
2659 uint16_t mb[MAILBOX_REGISTER_COUNT];
2660 int status;
2661
2662 ENTER("qla1280_abort_device");
2663
2664 mb[0] = MBC_ABORT_DEVICE;
2665 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2666 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2667
2668 /* Issue marker command. */
2669 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2670
2671 if (status)
2672 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2673
2674 LEAVE("qla1280_abort_device");
2675 return status;
2676}
2677
2678/*
2679 * qla1280_abort_command 2629 * qla1280_abort_command
2680 * Abort command aborts a specified IOCB. 2630 * Abort command aborts a specified IOCB.
2681 * 2631 *
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2833 2783
2834 /* If room for request in request ring. */ 2784 /* If room for request in request ring. */
2835 if ((req_cnt + 2) >= ha->req_q_cnt) { 2785 if ((req_cnt + 2) >= ha->req_q_cnt) {
2836 status = 1; 2786 status = SCSI_MLQUEUE_HOST_BUSY;
2837 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2787 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2838 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2788 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2839 req_cnt); 2789 req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 ha->outstanding_cmds[cnt] != NULL; cnt++); 2795 ha->outstanding_cmds[cnt] != NULL; cnt++);
2846 2796
2847 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2797 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2848 status = 1; 2798 status = SCSI_MLQUEUE_HOST_BUSY;
2849 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2799 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2850 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2800 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2851 goto out; 2801 goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3108 ha->req_q_cnt, seg_cnt); 3058 ha->req_q_cnt, seg_cnt);
3109 /* If room for request in request ring. */ 3059 /* If room for request in request ring. */
3110 if ((req_cnt + 2) >= ha->req_q_cnt) { 3060 if ((req_cnt + 2) >= ha->req_q_cnt) {
3111 status = 1; 3061 status = SCSI_MLQUEUE_HOST_BUSY;
3112 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3062 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3113 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3063 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3114 ha->req_q_cnt, req_cnt); 3064 ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3120 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3070 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3121 3071
3122 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3072 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3123 status = 1; 3073 status = SCSI_MLQUEUE_HOST_BUSY;
3124 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3074 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3125 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3075 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3126 goto out; 3076 goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3487 3437
3488 /* Save ISP completion status */ 3438 /* Save ISP completion status */
3489 CMD_RESULT(sp->cmd) = 0; 3439 CMD_RESULT(sp->cmd) = 0;
3440 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3490 3441
3491 /* Place block on done queue */ 3442 /* Place block on done queue */
3492 list_add_tail(&sp->list, done_q); 3443 list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3495 * If we get here we have a real problem! 3446 * If we get here we have a real problem!
3496 */ 3447 */
3497 printk(KERN_WARNING 3448 printk(KERN_WARNING
3498 "qla1280: ISP invalid handle"); 3449 "qla1280: ISP invalid handle\n");
3499 } 3450 }
3500 } 3451 }
3501 break; 3452 break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3753 } 3704 }
3754 } 3705 }
3755 3706
3707 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3708
3756 /* Place command on done queue. */ 3709 /* Place command on done queue. */
3757 list_add_tail(&sp->list, done_q); 3710 list_add_tail(&sp->list, done_q);
3758 out: 3711 out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3761 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } 3762 }
3810 3763
3764 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3765
3811 /* Place command on done queue. */ 3766 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q); 3767 list_add_tail(&sp->list, done_q);
3813 } 3768 }
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
3858 struct scsi_cmnd *cmd; 3813 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt]; 3814 sp = ha->outstanding_cmds[cnt];
3860 if (sp) { 3815 if (sp) {
3861
3862 cmd = sp->cmd; 3816 cmd = sp->cmd;
3863 CMD_RESULT(cmd) = DID_RESET << 16; 3817 CMD_RESULT(cmd) = DID_RESET << 16;
3864 3818 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3865 sp->cmd = NULL;
3866 ha->outstanding_cmds[cnt] = NULL; 3819 ha->outstanding_cmds[cnt] = NULL;
3867 3820 list_add_tail(&sp->list, &ha->done_q);
3868 (*cmd->scsi_done)(cmd);
3869
3870 sp->flags = 0;
3871 } 3821 }
3872 } 3822 }
3873 3823
3824 qla1280_done(ha);
3825
3874 status = qla1280_load_firmware(ha); 3826 status = qla1280_load_firmware(ha);
3875 if (status) 3827 if (status)
3876 goto out; 3828 goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3955 3907
3956 if (scsi_control == SCSI_PHASE_INVALID) { 3908 if (scsi_control == SCSI_PHASE_INVALID) {
3957 ha->bus_settings[bus].scsi_bus_dead = 1; 3909 ha->bus_settings[bus].scsi_bus_dead = 1;
3958#if 0
3959 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3960 CMD_HANDLE(cp) = INVALID_HANDLE;
3961 /* ha->actthreads--; */
3962
3963 (*(cp)->scsi_done)(cp);
3964#endif
3965 return 1; /* bus is dead */ 3910 return 1; /* bus is dead */
3966 } else { 3911 } else {
3967 ha->bus_settings[bus].scsi_bus_dead = 0; 3912 ha->bus_settings[bus].scsi_bus_dead = 0;
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d7c44b8d2b4f..834884b9eed5 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -88,7 +88,8 @@
88 88
89/* Maximum outstanding commands in ISP queues */ 89/* Maximum outstanding commands in ISP queues */
90#define MAX_OUTSTANDING_COMMANDS 512 90#define MAX_OUTSTANDING_COMMANDS 512
91#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) 91#define COMPLETED_HANDLE ((unsigned char *) \
92 (MAX_OUTSTANDING_COMMANDS + 2))
92 93
93/* ISP request and response entry counts (37-65535) */ 94/* ISP request and response entry counts (37-65535) */
94#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ 95#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b09993a06576..0f8796201504 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
97 return 0; 97 return 0;
98 98
99 if (IS_NOCACHE_VPD_TYPE(ha)) 99 if (IS_NOCACHE_VPD_TYPE(ha))
100 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
101 ha->nvram_size); 101 ha->nvram_size);
102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 102 return memory_read_from_buffer(buf, count, &off, ha->nvram,
103 ha->nvram_size); 103 ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
692 .read = qla2x00_sysfs_read_edc_status, 692 .read = qla2x00_sysfs_read_edc_status,
693}; 693};
694 694
695static ssize_t
696qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
697 struct bin_attribute *bin_attr,
698 char *buf, loff_t off, size_t count)
699{
700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
701 struct device, kobj)));
702 struct qla_hw_data *ha = vha->hw;
703 int rval;
704 uint16_t actual_size;
705
706 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
707 return 0;
708
709 if (ha->xgmac_data)
710 goto do_read;
711
712 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
713 &ha->xgmac_data_dma, GFP_KERNEL);
714 if (!ha->xgmac_data) {
715 qla_printk(KERN_WARNING, ha,
716 "Unable to allocate memory for XGMAC read-data.\n");
717 return 0;
718 }
719
720do_read:
721 actual_size = 0;
722 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
723
724 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
725 XGMAC_DATA_SIZE, &actual_size);
726 if (rval != QLA_SUCCESS) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to read XGMAC data (%x).\n", rval);
729 count = 0;
730 }
731
732 count = actual_size > count ? count: actual_size;
733 memcpy(buf, ha->xgmac_data, count);
734
735 return count;
736}
737
738static struct bin_attribute sysfs_xgmac_stats_attr = {
739 .attr = {
740 .name = "xgmac_stats",
741 .mode = S_IRUSR,
742 },
743 .size = 0,
744 .read = qla2x00_sysfs_read_xgmac_stats,
745};
746
747static ssize_t
748qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
749 struct bin_attribute *bin_attr,
750 char *buf, loff_t off, size_t count)
751{
752 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
753 struct device, kobj)));
754 struct qla_hw_data *ha = vha->hw;
755 int rval;
756 uint16_t actual_size;
757
758 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
759 return 0;
760
761 if (ha->dcbx_tlv)
762 goto do_read;
763
764 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
765 &ha->dcbx_tlv_dma, GFP_KERNEL);
766 if (!ha->dcbx_tlv) {
767 qla_printk(KERN_WARNING, ha,
768 "Unable to allocate memory for DCBX TLV read-data.\n");
769 return 0;
770 }
771
772do_read:
773 actual_size = 0;
774 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
775
776 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
777 DCBX_TLV_DATA_SIZE);
778 if (rval != QLA_SUCCESS) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to read DCBX TLV data (%x).\n", rval);
781 count = 0;
782 }
783
784 memcpy(buf, ha->dcbx_tlv, count);
785
786 return count;
787}
788
789static struct bin_attribute sysfs_dcbx_tlv_attr = {
790 .attr = {
791 .name = "dcbx_tlv",
792 .mode = S_IRUSR,
793 },
794 .size = 0,
795 .read = qla2x00_sysfs_read_dcbx_tlv,
796};
797
695static struct sysfs_entry { 798static struct sysfs_entry {
696 char *name; 799 char *name;
697 struct bin_attribute *attr; 800 struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
706 { "reset", &sysfs_reset_attr, }, 809 { "reset", &sysfs_reset_attr, },
707 { "edc", &sysfs_edc_attr, 2 }, 810 { "edc", &sysfs_edc_attr, 2 },
708 { "edc_status", &sysfs_edc_status_attr, 2 }, 811 { "edc_status", &sysfs_edc_status_attr, 2 },
812 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
813 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
709 { NULL }, 814 { NULL },
710}; 815};
711 816
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
721 continue; 826 continue;
722 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 827 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
723 continue; 828 continue;
829 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
830 continue;
724 831
725 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 832 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
726 iter->attr); 833 iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
743 continue; 850 continue;
744 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 851 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
745 continue; 852 continue;
853 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
854 continue;
746 855
747 sysfs_remove_bin_file(&host->shost_gendev.kobj, 856 sysfs_remove_bin_file(&host->shost_gendev.kobj,
748 iter->attr); 857 iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
1088 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1197 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1089} 1198}
1090 1199
1200static ssize_t
1201qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1202 char *buf)
1203{
1204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1205
1206 if (!IS_QLA81XX(vha->hw))
1207 return snprintf(buf, PAGE_SIZE, "\n");
1208
1209 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1210}
1211
1212static ssize_t
1213qla2x00_vn_port_mac_address_show(struct device *dev,
1214 struct device_attribute *attr, char *buf)
1215{
1216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1217
1218 if (!IS_QLA81XX(vha->hw))
1219 return snprintf(buf, PAGE_SIZE, "\n");
1220
1221 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1222 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1223 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1224 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1225}
1226
1227static ssize_t
1228qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1229 char *buf)
1230{
1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232
1233 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1234}
1235
1236static ssize_t
1237qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf)
1239{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval;
1242 uint16_t state[5];
1243
1244 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state));
1247
1248 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1249 state[1], state[2], state[3], state[4]);
1250}
1251
1091static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1252static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1092static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1253static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1093static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1254static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1116static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1277static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1117static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1278static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1118 NULL); 1279 NULL);
1280static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1281static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1282 qla2x00_vn_port_mac_address_show, NULL);
1283static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1284static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1119 1285
1120struct device_attribute *qla2x00_host_attrs[] = { 1286struct device_attribute *qla2x00_host_attrs[] = {
1121 &dev_attr_driver_version, 1287 &dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
1138 &dev_attr_mpi_version, 1304 &dev_attr_mpi_version,
1139 &dev_attr_phy_version, 1305 &dev_attr_phy_version,
1140 &dev_attr_flash_block_size, 1306 &dev_attr_flash_block_size,
1307 &dev_attr_vlan_id,
1308 &dev_attr_vn_port_mac_address,
1309 &dev_attr_fabric_param,
1310 &dev_attr_fw_state,
1141 NULL, 1311 NULL,
1142}; 1312};
1143 1313
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1313 * At this point all fcport's software-states are cleared. Perform any 1483 * At this point all fcport's software-states are cleared. Perform any
1314 * final cleanup of firmware resources (PCBs and XCBs). 1484 * final cleanup of firmware resources (PCBs and XCBs).
1315 */ 1485 */
1316 if (fcport->loop_id != FC_NO_LOOP_ID) 1486 if (fcport->loop_id != FC_NO_LOOP_ID &&
1487 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1317 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1318 fcport->loop_id, fcport->d_id.b.domain, 1489 fcport->loop_id, fcport->d_id.b.domain,
1319 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1490 fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
1437qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1608qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1438{ 1609{
1439 int ret = 0; 1610 int ret = 0;
1440 int cnt = 0; 1611 uint8_t qos = 0;
1441 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1612 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1443 scsi_qla_host_t *vha = NULL; 1613 scsi_qla_host_t *vha = NULL;
1444 struct qla_hw_data *ha = base_vha->hw; 1614 struct qla_hw_data *ha = base_vha->hw;
1615 uint16_t options = 0;
1616 int cnt;
1617 struct req_que *req = ha->req_q_map[0];
1445 1618
1446 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1619 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1447 if (ret) { 1620 if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1497 1670
1498 qla24xx_vport_disable(fc_vport, disable); 1671 qla24xx_vport_disable(fc_vport, disable);
1499 1672
1500 /* Create a queue pair for the vport */ 1673 if (ql2xmultique_tag) {
1501 if (ha->mqenable) { 1674 req = ha->req_q_map[1];
1502 if (ha->npiv_info) { 1675 goto vport_queue;
1503 for (; cnt < ha->nvram_npiv_size; cnt++) { 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1504 if (ha->npiv_info[cnt].port_name == 1677 goto vport_queue;
1505 vha->port_name && 1678 /* Create a request queue in QoS mode for the vport */
1506 ha->npiv_info[cnt].node_name == 1679 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1507 vha->node_name) { 1680 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1508 qos = ha->npiv_info[cnt].q_qos; 1681 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1509 break; 1682 8) == 0) {
1510 } 1683 qos = ha->npiv_info[cnt].q_qos;
1511 } 1684 break;
1685 }
1686 }
1687 if (qos) {
1688 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1689 qos);
1690 if (!ret)
1691 qla_printk(KERN_WARNING, ha,
1692 "Can't create request queue for vp_idx:%d\n",
1693 vha->vp_idx);
1694 else {
1695 DEBUG2(qla_printk(KERN_INFO, ha,
1696 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1697 ret, qos, vha->vp_idx));
1698 req = ha->req_q_map[ret];
1512 } 1699 }
1513 qla25xx_create_queues(vha, qos);
1514 } 1700 }
1515 1701
1702vport_queue:
1703 vha->req = req;
1516 return 0; 1704 return 0;
1705
1517vport_create_failed_2: 1706vport_create_failed_2:
1518 qla24xx_disable_vp(vha); 1707 qla24xx_disable_vp(vha);
1519 qla24xx_deallocate_vp_id(vha); 1708 qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1554 vha->host_no, vha->vp_idx, vha)); 1743 vha->host_no, vha->vp_idx, vha));
1555 } 1744 }
1556 1745
1557 if (ha->mqenable) { 1746 if (vha->req->id && !ql2xmultique_tag) {
1558 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) 1747 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1559 qla_printk(KERN_WARNING, ha, 1748 qla_printk(KERN_WARNING, ha,
1560 "Queue delete failed.\n"); 1749 "Queue delete failed.\n");
1561 } 1750 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f17..4a990f4da4ea 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS; 149 int rval = QLA_SUCCESS;
150 uint32_t cnt; 150 uint32_t cnt;
151 151
152 if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
153 return rval;
154
155 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); 152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
156 for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 && 153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
157 rval == QLA_SUCCESS; cnt--) { 155 rval == QLA_SUCCESS; cnt--) {
158 if (cnt) 156 if (cnt)
159 udelay(100); 157 udelay(100);
@@ -351,7 +349,7 @@ static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 349qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{ 350{
353 uint32_t cnt, que_idx; 351 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt; 352 uint8_t que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr; 353 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg; 354 struct device_reg_25xxmq __iomem *reg;
357 355
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365 363
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 365 ha->max_req_queues : ha->max_rsp_queues;
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt); 366 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) { 367 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *) 368 reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 714ee67567e1..00aa48d975a6 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
93#define LSD(x) ((uint32_t)((uint64_t)(x))) 93#define LSD(x) ((uint32_t)((uint64_t)(x)))
94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
95 95
96#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
96 97
97/* 98/*
98 * I/O register 99 * I/O register
@@ -179,6 +180,7 @@
179#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 180#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
180#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
181#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
182 184
183struct req_que; 185struct req_que;
184 186
@@ -186,7 +188,6 @@ struct req_que;
186 * SCSI Request Block 188 * SCSI Request Block
187 */ 189 */
188typedef struct srb { 190typedef struct srb {
189 struct req_que *que;
190 struct fc_port *fcport; 191 struct fc_port *fcport;
191 192
192 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 193 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
2008#define VP_RET_CODE_NOT_FOUND 6 2009#define VP_RET_CODE_NOT_FOUND 6
2009 2010
2010struct qla_hw_data; 2011struct qla_hw_data;
2011 2012struct rsp_que;
2012/* 2013/*
2013 * ISP operations 2014 * ISP operations
2014 */ 2015 */
@@ -2030,10 +2031,9 @@ struct isp_operations {
2030 void (*enable_intrs) (struct qla_hw_data *); 2031 void (*enable_intrs) (struct qla_hw_data *);
2031 void (*disable_intrs) (struct qla_hw_data *); 2032 void (*disable_intrs) (struct qla_hw_data *);
2032 2033
2033 int (*abort_command) (struct scsi_qla_host *, srb_t *, 2034 int (*abort_command) (srb_t *);
2034 struct req_que *); 2035 int (*target_reset) (struct fc_port *, unsigned int, int);
2035 int (*target_reset) (struct fc_port *, unsigned int); 2036 int (*lun_reset) (struct fc_port *, unsigned int, int);
2036 int (*lun_reset) (struct fc_port *, unsigned int);
2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
2038 uint8_t, uint8_t, uint16_t *, uint8_t); 2038 uint8_t, uint8_t, uint16_t *, uint8_t);
2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, 2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
2079#define QLA_PCI_MSIX_CONTROL 0xa2 2079#define QLA_PCI_MSIX_CONTROL 0xa2
2080 2080
2081struct scsi_qla_host; 2081struct scsi_qla_host;
2082struct rsp_que;
2083 2082
2084struct qla_msix_entry { 2083struct qla_msix_entry {
2085 int have_irq; 2084 int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
2140#define MBC_INITIALIZE_MULTIQ 0x1f 2139#define MBC_INITIALIZE_MULTIQ 0x1f
2141#define QLA_QUE_PAGE 0X1000 2140#define QLA_QUE_PAGE 0X1000
2142#define QLA_MQ_SIZE 32 2141#define QLA_MQ_SIZE 32
2143#define QLA_MAX_HOST_QUES 16
2144#define QLA_MAX_QUEUES 256 2142#define QLA_MAX_QUEUES 256
2145#define ISP_QUE_REG(ha, id) \ 2143#define ISP_QUE_REG(ha, id) \
2146 ((ha->mqenable) ? \ 2144 ((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
2170 struct qla_hw_data *hw; 2168 struct qla_hw_data *hw;
2171 struct qla_msix_entry *msix; 2169 struct qla_msix_entry *msix;
2172 struct req_que *req; 2170 struct req_que *req;
2171 srb_t *status_srb; /* status continuation entry */
2172 struct work_struct q_work;
2173}; 2173};
2174 2174
2175/* Request queue data structure */ 2175/* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
2222 uint32_t fce_enabled :1; 2222 uint32_t fce_enabled :1;
2223 uint32_t fac_supported :1; 2223 uint32_t fac_supported :1;
2224 uint32_t chip_reset_done :1; 2224 uint32_t chip_reset_done :1;
2225 uint32_t port0 :1;
2226 uint32_t running_gold_fw :1;
2225 } flags; 2227 } flags;
2226 2228
2227 /* This spinlock is used to protect "io transactions", you must 2229 /* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
2246 struct rsp_que **rsp_q_map; 2248 struct rsp_que **rsp_q_map;
2247 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2249 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2248 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2250 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2249 uint16_t max_queues; 2251 uint8_t max_req_queues;
2252 uint8_t max_rsp_queues;
2250 struct qla_npiv_entry *npiv_info; 2253 struct qla_npiv_entry *npiv_info;
2251 uint16_t nvram_npiv_size; 2254 uint16_t nvram_npiv_size;
2252 2255
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
2255#define FLOGI_MID_SUPPORT BIT_10 2258#define FLOGI_MID_SUPPORT BIT_10
2256#define FLOGI_VSAN_SUPPORT BIT_12 2259#define FLOGI_VSAN_SUPPORT BIT_12
2257#define FLOGI_SP_SUPPORT BIT_13 2260#define FLOGI_SP_SUPPORT BIT_13
2261
2262 uint8_t port_no; /* Physical port of adapter */
2263
2258 /* Timeout timers. */ 2264 /* Timeout timers. */
2259 uint8_t loop_down_abort_time; /* port down timer */ 2265 uint8_t loop_down_abort_time; /* port down timer */
2260 atomic_t loop_down_timer; /* loop down timer */ 2266 atomic_t loop_down_timer; /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
2392 dma_addr_t edc_data_dma; 2398 dma_addr_t edc_data_dma;
2393 uint16_t edc_data_len; 2399 uint16_t edc_data_len;
2394 2400
2401#define XGMAC_DATA_SIZE PAGE_SIZE
2402 void *xgmac_data;
2403 dma_addr_t xgmac_data_dma;
2404
2405#define DCBX_TLV_DATA_SIZE PAGE_SIZE
2406 void *dcbx_tlv;
2407 dma_addr_t dcbx_tlv_dma;
2408
2395 struct task_struct *dpc_thread; 2409 struct task_struct *dpc_thread;
2396 uint8_t dpc_active; /* DPC routine is active */ 2410 uint8_t dpc_active; /* DPC routine is active */
2397 2411
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
2510 uint32_t flt_region_vpd; 2524 uint32_t flt_region_vpd;
2511 uint32_t flt_region_nvram; 2525 uint32_t flt_region_nvram;
2512 uint32_t flt_region_npiv_conf; 2526 uint32_t flt_region_npiv_conf;
2527 uint32_t flt_region_gold_fw;
2513 2528
2514 /* Needed for BEACON */ 2529 /* Needed for BEACON */
2515 uint16_t beacon_blink_led; 2530 uint16_t beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
2536 struct qla_chip_state_84xx *cs84xx; 2551 struct qla_chip_state_84xx *cs84xx;
2537 struct qla_statistics qla_stats; 2552 struct qla_statistics qla_stats;
2538 struct isp_operations *isp_ops; 2553 struct isp_operations *isp_ops;
2554 struct workqueue_struct *wq;
2539}; 2555};
2540 2556
2541/* 2557/*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
2545 struct list_head list; 2561 struct list_head list;
2546 struct list_head vp_fcports; /* list of fcports */ 2562 struct list_head vp_fcports; /* list of fcports */
2547 struct list_head work_list; 2563 struct list_head work_list;
2564 spinlock_t work_lock;
2565
2548 /* Commonly used flags and state information. */ 2566 /* Commonly used flags and state information. */
2549 struct Scsi_Host *host; 2567 struct Scsi_Host *host;
2550 unsigned long host_no; 2568 unsigned long host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
2591#define SWITCH_FOUND BIT_0 2609#define SWITCH_FOUND BIT_0
2592#define DFLG_NO_CABLE BIT_1 2610#define DFLG_NO_CABLE BIT_1
2593 2611
2594 srb_t *status_srb; /* Status continuation entry. */
2595
2596 /* ISP configuration data. */ 2612 /* ISP configuration data. */
2597 uint16_t loop_id; /* Host adapter loop id */ 2613 uint16_t loop_id; /* Host adapter loop id */
2598 2614
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
2618 uint8_t node_name[WWN_SIZE]; 2634 uint8_t node_name[WWN_SIZE];
2619 uint8_t port_name[WWN_SIZE]; 2635 uint8_t port_name[WWN_SIZE];
2620 uint8_t fabric_node_name[WWN_SIZE]; 2636 uint8_t fabric_node_name[WWN_SIZE];
2637
2638 uint16_t fcoe_vlan_id;
2639 uint16_t fcoe_fcf_idx;
2640 uint8_t fcoe_vn_port_mac[6];
2641
2621 uint32_t vp_abort_cnt; 2642 uint32_t vp_abort_cnt;
2622 2643
2623 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2644 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
2643#define VP_ERR_FAB_LOGOUT 4 2664#define VP_ERR_FAB_LOGOUT 4
2644#define VP_ERR_ADAP_NORESOURCES 5 2665#define VP_ERR_ADAP_NORESOURCES 5
2645 struct qla_hw_data *hw; 2666 struct qla_hw_data *hw;
2646 int req_ques[QLA_MAX_HOST_QUES]; 2667 struct req_que *req;
2647} scsi_qla_host_t; 2668} scsi_qla_host_t;
2648 2669
2649/* 2670/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 96ccb9642ba0..dfde2dd865cb 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -878,7 +878,6 @@ struct device_reg_24xx {
878 /* HCCR statuses. */ 878 /* HCCR statuses. */
879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ 879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ 880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
881#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */
882 /* HCCR commands. */ 881 /* HCCR commands. */
883 /* NOOP. */ 882 /* NOOP. */
884#define HCCRX_NOOP 0x00000000 883#define HCCRX_NOOP 0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
1241#define FLT_REG_HW_EVENT_1 0x1f 1240#define FLT_REG_HW_EVENT_1 0x1f
1242#define FLT_REG_NPIV_CONF_0 0x29 1241#define FLT_REG_NPIV_CONF_0 0x29
1243#define FLT_REG_NPIV_CONF_1 0x2a 1242#define FLT_REG_NPIV_CONF_1 0x2a
1243#define FLT_REG_GOLD_FW 0x2f
1244 1244
1245struct qla_flt_region { 1245struct qla_flt_region {
1246 uint32_t code; 1246 uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
1405#define MBC_IDC_ACK 0x101 1405#define MBC_IDC_ACK 0x101
1406#define MBC_RESTART_MPI_FW 0x3d 1406#define MBC_RESTART_MPI_FW 0x3d
1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ 1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
1408#define MBC_GET_XGMAC_STATS 0x7a
1409#define MBC_GET_DCBX_PARAMS 0x51
1408 1410
1409/* Flash access control option field bit definitions */ 1411/* Flash access control option field bit definitions */
1410#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1412#define FAC_OPT_FORCE_SEMAPHORE BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
1711#define FA_VPD0_ADDR_81 0xD0000 1713#define FA_VPD0_ADDR_81 0xD0000
1712#define FA_VPD1_ADDR_81 0xD0400 1714#define FA_VPD1_ADDR_81 0xD0400
1713#define FA_NVRAM0_ADDR_81 0xD0080 1715#define FA_NVRAM0_ADDR_81 0xD0080
1714#define FA_NVRAM1_ADDR_81 0xD0480 1716#define FA_NVRAM1_ADDR_81 0xD0180
1715#define FA_FEATURE_ADDR_81 0xD4000 1717#define FA_FEATURE_ADDR_81 0xD4000
1716#define FA_FLASH_DESCR_ADDR_81 0xD8000 1718#define FA_FLASH_DESCR_ADDR_81 0xD8000
1717#define FA_FLASH_LAYOUT_ADDR_81 0xD8400 1719#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed9..65b12d82867c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
65extern int ql2xallocfwdump; 65extern int ql2xallocfwdump;
66extern int ql2xextended_error_logging; 66extern int ql2xextended_error_logging;
67extern int ql2xqfullrampup; 67extern int ql2xqfullrampup;
68extern int ql2xqfulltracking;
68extern int ql2xiidmaenable; 69extern int ql2xiidmaenable;
69extern int ql2xmaxqueues; 70extern int ql2xmaxqueues;
71extern int ql2xmultique_tag;
72extern int ql2xfwloadbin;
70 73
71extern int qla2x00_loop_reset(scsi_qla_host_t *); 74extern int qla2x00_loop_reset(scsi_qla_host_t *);
72extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 75extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
145extern int 148extern int
146qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 149qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
147 150
148extern void 151extern int
149qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, 152qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
150 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); 153 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
151 154
@@ -165,13 +168,13 @@ extern int
165qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 168qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
166 169
167extern int 170extern int
168qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 171qla2x00_abort_command(srb_t *);
169 172
170extern int 173extern int
171qla2x00_abort_target(struct fc_port *, unsigned int); 174qla2x00_abort_target(struct fc_port *, unsigned int, int);
172 175
173extern int 176extern int
174qla2x00_lun_reset(struct fc_port *, unsigned int); 177qla2x00_lun_reset(struct fc_port *, unsigned int, int);
175 178
176extern int 179extern int
177qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 180qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
236qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 239qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
237 dma_addr_t); 240 dma_addr_t);
238 241
239extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 242extern int qla24xx_abort_command(srb_t *);
240extern int qla24xx_abort_target(struct fc_port *, unsigned int); 243extern int
241extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 244qla24xx_abort_target(struct fc_port *, unsigned int, int);
245extern int
246qla24xx_lun_reset(struct fc_port *, unsigned int, int);
242 247
243extern int 248extern int
244qla2x00_system_error(scsi_qla_host_t *); 249qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
288extern int 293extern int
289qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); 294qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
290 295
296extern int
297qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
298
299extern int
300qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
301
302extern int
303qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
304
305extern int
306qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
307
291/* 308/*
292 * Global Function Prototypes in qla_isr.c source file. 309 * Global Function Prototypes in qla_isr.c source file.
293 */ 310 */
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
295extern irqreturn_t qla2300_intr_handler(int, void *); 312extern irqreturn_t qla2300_intr_handler(int, void *);
296extern irqreturn_t qla24xx_intr_handler(int, void *); 313extern irqreturn_t qla24xx_intr_handler(int, void *);
297extern void qla2x00_process_response_queue(struct rsp_que *); 314extern void qla2x00_process_response_queue(struct rsp_que *);
298extern void qla24xx_process_response_queue(struct rsp_que *); 315extern void
299 316qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
300extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 317extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
301extern void qla2x00_free_irqs(scsi_qla_host_t *); 318extern void qla2x00_free_irqs(scsi_qla_host_t *);
302 319
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
401extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 418extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
402extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 419extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
403extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 420extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
404 uint16_t, uint8_t, uint8_t); 421 uint16_t, int, uint8_t);
405extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 422extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
406 uint16_t); 423 uint16_t, int);
407extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); 424extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
408extern void qla2x00_init_response_q_entries(struct rsp_que *); 425extern void qla2x00_init_response_q_entries(struct rsp_que *);
409extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 426extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
410extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); 427extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
411extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); 428extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
412extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); 429extern int qla25xx_delete_queues(struct scsi_qla_host *);
413extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 430extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
414extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 431extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
415extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 432extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
416extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 433extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
417extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 434extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
418extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 435extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
436extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
437
419#endif /* _QLA_GBL_H */ 438#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 557f58d5bf88..917534b9f221 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1107 return ret; 1107 return ret;
1108 1108
1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1110 mb, BIT_1); 1110 mb, BIT_1|BIT_0);
1111 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1879 case BIT_13: 1879 case BIT_13:
1880 list[i].fp_speed = PORT_SPEED_4GB; 1880 list[i].fp_speed = PORT_SPEED_4GB;
1881 break; 1881 break;
1882 case BIT_12:
1883 list[i].fp_speed = PORT_SPEED_10GB;
1884 break;
1882 case BIT_11: 1885 case BIT_11:
1883 list[i].fp_speed = PORT_SPEED_8GB; 1886 list[i].fp_speed = PORT_SPEED_8GB;
1884 break; 1887 break;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bd7dd84c0648..262026129325 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
634 goto chip_diag_failed; 634 goto chip_diag_failed;
635 635
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
637 ha->host_no)); 637 vha->host_no));
638 638
639 /* Reset RISC processor. */ 639 /* Reset RISC processor. */
640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
655 goto chip_diag_failed; 655 goto chip_diag_failed;
656 656
657 /* Check product ID of chip */ 657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
659 659
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
730 struct qla_hw_data *ha = vha->hw; 730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0]; 731 struct req_que *req = ha->req_q_map[0];
732 732
733 /* Perform RISC reset. */
734 qla24xx_reset_risc(vha);
735
736 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
737 734
738 rval = qla2x00_mbx_reg_test(vha); 735 rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
786 sizeof(uint32_t); 783 sizeof(uint32_t);
787 if (ha->mqenable) 784 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 785 mq_size = sizeof(struct qla2xxx_mq_chain);
789
790 /* Allocate memory for Fibre Channel Event Buffer. */ 786 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 788 goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
850 rsp_q_size = rsp->length * sizeof(response_t); 846 rsp_q_size = rsp->length * sizeof(response_t);
851 847
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
854 eft_size;
855 ha->chain_offset = dump_size; 850 ha->chain_offset = dump_size;
856 dump_size += mq_size + fce_size; 851 dump_size += mq_size + fce_size;
857 852
@@ -891,6 +886,56 @@ cont_alloc:
891 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
892} 887}
893 888
889static int
890qla81xx_mpi_sync(scsi_qla_host_t *vha)
891{
892#define MPS_MASK 0xe0
893 int rval;
894 uint16_t dc;
895 uint32_t dw;
896 struct qla_hw_data *ha = vha->hw;
897
898 if (!IS_QLA81XX(vha->hw))
899 return QLA_SUCCESS;
900
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
905 goto done;
906 }
907
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
913 goto done_release;
914 }
915
916 dc &= MPS_MASK;
917 if (dc == (dw & MPS_MASK))
918 goto done_release;
919
920 dw &= ~MPS_MASK;
921 dw |= dc;
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
926 }
927
928done_release:
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
933 }
934
935done:
936 return rval;
937}
938
894/** 939/**
895 * qla2x00_setup_chip() - Load and start RISC firmware. 940 * qla2x00_setup_chip() - Load and start RISC firmware.
896 * @ha: HA context 941 * @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
915 spin_unlock_irqrestore(&ha->hardware_lock, flags); 960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 } 961 }
917 962
963 qla81xx_mpi_sync(vha);
964
918 /* Load firmware sequences */ 965 /* Load firmware sequences */
919 rval = ha->isp_ops->load_risc(vha, &srisc_address); 966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
920 if (rval == QLA_SUCCESS) { 967 if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
931 /* Retrieve firmware information. */ 978 /* Retrieve firmware information. */
932 if (rval == QLA_SUCCESS) { 979 if (rval == QLA_SUCCESS) {
933 fw_major_version = ha->fw_major_version; 980 fw_major_version = ha->fw_major_version;
934 qla2x00_get_fw_version(vha, 981 rval = qla2x00_get_fw_version(vha,
935 &ha->fw_major_version, 982 &ha->fw_major_version,
936 &ha->fw_minor_version, 983 &ha->fw_minor_version,
937 &ha->fw_subminor_version, 984 &ha->fw_subminor_version,
938 &ha->fw_attributes, &ha->fw_memory_size, 985 &ha->fw_attributes, &ha->fw_memory_size,
939 ha->mpi_version, &ha->mpi_capabilities, 986 ha->mpi_version, &ha->mpi_capabilities,
940 ha->phy_version); 987 ha->phy_version);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
941 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
942 if (IS_QLA2XXX_MIDTYPE(ha) && 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
943 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
989 ha->fw_subminor_version); 1039 ha->fw_subminor_version);
990 } 1040 }
991 } 1041 }
992 1042failed:
993 if (rval) { 1043 if (rval) {
994 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
995 vha->host_no)); 1045 vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
1013 uint16_t cnt; 1063 uint16_t cnt;
1014 response_t *pkt; 1064 response_t *pkt;
1015 1065
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1016 pkt = rsp->ring_ptr; 1069 pkt = rsp->ring_ptr;
1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1018 pkt->signature = RESPONSE_PROCESSED; 1071 pkt->signature = RESPONSE_PROCESSED;
1019 pkt++; 1072 pkt++;
1020 } 1073 }
1021
1022} 1074}
1023 1075
1024/** 1076/**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1176 if (ha->flags.msix_enabled) { 1228 if (ha->flags.msix_enabled) {
1177 msix = &ha->msix_entries[1]; 1229 msix = &ha->msix_entries[1];
1178 DEBUG2_17(printk(KERN_INFO 1230 DEBUG2_17(printk(KERN_INFO
1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1231 "Registering vector 0x%x for base que\n", msix->entry));
1180 icb->msix = cpu_to_le16(msix->entry); 1232 icb->msix = cpu_to_le16(msix->entry);
1181 } 1233 }
1182 /* Use alternate PCI bus number */ 1234 /* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 1283
1232 /* Clear outstanding commands array. */ 1284 /* Clear outstanding commands array. */
1233 for (que = 0; que < ha->max_queues; que++) { 1285 for (que = 0; que < ha->max_req_queues; que++) {
1234 req = ha->req_q_map[que]; 1286 req = ha->req_q_map[que];
1235 if (!req) 1287 if (!req)
1236 continue; 1288 continue;
1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1238 req->outstanding_cmds[cnt] = NULL; 1290 req->outstanding_cmds[cnt] = NULL;
1239 1291
1240 req->current_outstanding_cmd = 0; 1292 req->current_outstanding_cmd = 1;
1241 1293
1242 /* Initialize firmware. */ 1294 /* Initialize firmware. */
1243 req->ring_ptr = req->ring; 1295 req->ring_ptr = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1245 req->cnt = req->length; 1297 req->cnt = req->length;
1246 } 1298 }
1247 1299
1248 for (que = 0; que < ha->max_queues; que++) { 1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1249 rsp = ha->rsp_q_map[que]; 1301 rsp = ha->rsp_q_map[que];
1250 if (!rsp) 1302 if (!rsp)
1251 continue; 1303 continue;
1252 rsp->ring_ptr = rsp->ring;
1253 rsp->ring_index = 0;
1254
1255 /* Initialize response queue entries */ 1304 /* Initialize response queue entries */
1256 qla2x00_init_response_q_entries(rsp); 1305 qla2x00_init_response_q_entries(rsp);
1257 } 1306 }
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1307 unsigned long wtime, mtime, cs84xx_time; 1356 unsigned long wtime, mtime, cs84xx_time;
1308 uint16_t min_wait; /* Minimum wait time if loop is down */ 1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1309 uint16_t wait_time; /* Wait time if loop is coming ready */ 1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1310 uint16_t state[3]; 1359 uint16_t state[5];
1311 struct qla_hw_data *ha = vha->hw; 1360 struct qla_hw_data *ha = vha->hw;
1312 1361
1313 rval = QLA_SUCCESS; 1362 rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1406 vha->host_no, state[0], jiffies)); 1455 vha->host_no, state[0], jiffies));
1407 } while (1); 1456 } while (1);
1408 1457
1409 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1410 vha->host_no, state[0], jiffies)); 1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1460 jiffies));
1411 1461
1412 if (rval) { 1462 if (rval) {
1413 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1541 char *st, *en; 1591 char *st, *en;
1542 uint16_t index; 1592 uint16_t index;
1543 struct qla_hw_data *ha = vha->hw; 1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1544 1595
1545 if (memcmp(model, BINZERO, len) != 0) { 1596 if (memcmp(model, BINZERO, len) != 0) {
1546 strncpy(ha->model_number, model, len); 1597 strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 } 1604 }
1554 1605
1555 index = (ha->pdev->subsystem_device & 0xff); 1606 index = (ha->pdev->subsystem_device & 0xff);
1556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1607 if (use_tbl &&
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1557 index < QLA_MODEL_NAMES) 1609 index < QLA_MODEL_NAMES)
1558 strncpy(ha->model_desc, 1610 strncpy(ha->model_desc,
1559 qla2x00_model_name[index * 2 + 1], 1611 qla2x00_model_name[index * 2 + 1],
1560 sizeof(ha->model_desc) - 1); 1612 sizeof(ha->model_desc) - 1);
1561 } else { 1613 } else {
1562 index = (ha->pdev->subsystem_device & 0xff); 1614 index = (ha->pdev->subsystem_device & 0xff);
1563 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1615 if (use_tbl &&
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1564 index < QLA_MODEL_NAMES) { 1617 index < QLA_MODEL_NAMES) {
1565 strcpy(ha->model_number, 1618 strcpy(ha->model_number,
1566 qla2x00_model_name[index * 2]); 1619 qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2061 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2062 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2063 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2064 if (test_bit(RSCN_UPDATE, &save_flags)) 2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2065 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2120 }
2066 } 2121 }
2067 2122
2068 return (rval); 2123 return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2110 goto cleanup_allocation; 2165 goto cleanup_allocation;
2111 2166
2112 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2113 ha->host_no, entries)); 2168 vha->host_no, entries));
2114 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2115 entries * sizeof(struct gid_list_info))); 2170 entries * sizeof(struct gid_list_info)));
2116 2171
@@ -2243,7 +2298,8 @@ static void
2243qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2298qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2244{ 2299{
2245#define LS_UNKNOWN 2 2300#define LS_UNKNOWN 2
2246 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2302 char *link_speed;
2247 int rval; 2303 int rval;
2248 uint16_t mb[6]; 2304 uint16_t mb[6];
2249 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2266 fcport->port_name[6], fcport->port_name[7], rval, 2322 fcport->port_name[6], fcport->port_name[7], rval,
2267 fcport->fp_speed, mb[0], mb[1])); 2323 fcport->fp_speed, mb[0], mb[1]));
2268 } else { 2324 } else {
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2269 DEBUG2(qla_printk(KERN_INFO, ha, 2330 DEBUG2(qla_printk(KERN_INFO, ha,
2270 "iIDMA adjusted to %s GB/s on " 2331 "iIDMA adjusted to %s GB/s on "
2271 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2272 link_speeds[fcport->fp_speed], fcport->port_name[0], 2333 link_speed, fcport->port_name[0],
2273 fcport->port_name[1], fcport->port_name[2], 2334 fcport->port_name[1], fcport->port_name[2],
2274 fcport->port_name[3], fcport->port_name[4], 2335 fcport->port_name[3], fcport->port_name[4],
2275 fcport->port_name[5], fcport->port_name[6], 2336 fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3180{ 3241{
3181 int rval = QLA_SUCCESS; 3242 int rval = QLA_SUCCESS;
3182 uint32_t wait_time; 3243 uint32_t wait_time;
3183 struct qla_hw_data *ha = vha->hw; 3244 struct req_que *req;
3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3245 struct rsp_que *rsp;
3185 struct rsp_que *rsp = req->rsp; 3246
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3249 else
3250 req = vha->req;
3251 rsp = req->rsp;
3186 3252
3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3188 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3448 int ret = -1; 3514 int ret = -1;
3449 int i; 3515 int i;
3450 3516
3451 for (i = 1; i < ha->max_queues; i++) { 3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3452 rsp = ha->rsp_q_map[i]; 3518 rsp = ha->rsp_q_map[i];
3453 if (rsp) { 3519 if (rsp) {
3454 rsp->options &= ~BIT_0; 3520 rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3462 "%s Rsp que:%d inited\n", __func__, 3528 "%s Rsp que:%d inited\n", __func__,
3463 rsp->id)); 3529 rsp->id));
3464 } 3530 }
3531 }
3532 for (i = 1; i < ha->max_req_queues; i++) {
3465 req = ha->req_q_map[i]; 3533 req = ha->req_q_map[i];
3466 if (req) { 3534 if (req) {
3467 /* Clear outstanding commands array. */ 3535 /* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3566 nv = ha->nvram; 3634 nv = ha->nvram;
3567 3635
3568 /* Determine NVRAM starting address. */ 3636 /* Determine NVRAM starting address. */
3569 ha->nvram_size = sizeof(struct nvram_24xx); 3637 if (ha->flags.port0) {
3570 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3571 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3572 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3640 } else {
3573 if (PCI_FUNC(ha->pdev->devfn)) {
3574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3575 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3576 } 3643 }
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3577 3646
3578 /* Get VPD data into cache */ 3647 /* Get VPD data into cache */
3579 ha->vpd = ha->nvram + VPD_OFFSET; 3648 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3587 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3588 chksum += le32_to_cpu(*dptr++); 3657 chksum += le32_to_cpu(*dptr++);
3589 3658
3590 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3591 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3592 3661
3593 /* Bad NVRAM data, set defaults parameters. */ 3662 /* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3612 nv->exchange_count = __constant_cpu_to_le16(0); 3681 nv->exchange_count = __constant_cpu_to_le16(0);
3613 nv->hard_address = __constant_cpu_to_le16(124); 3682 nv->hard_address = __constant_cpu_to_le16(124);
3614 nv->port_name[0] = 0x21; 3683 nv->port_name[0] = 0x21;
3615 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3684 nv->port_name[1] = 0x00 + ha->port_no;
3616 nv->port_name[2] = 0x00; 3685 nv->port_name[2] = 0x00;
3617 nv->port_name[3] = 0xe0; 3686 nv->port_name[3] = 0xe0;
3618 nv->port_name[4] = 0x8b; 3687 nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3798} 3867}
3799 3868
3800static int 3869static int
3801qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3870qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3871 uint32_t faddr)
3802{ 3872{
3803 int rval = QLA_SUCCESS; 3873 int rval = QLA_SUCCESS;
3804 int segments, fragment; 3874 int segments, fragment;
3805 uint32_t faddr;
3806 uint32_t *dcode, dlen; 3875 uint32_t *dcode, dlen;
3807 uint32_t risc_addr; 3876 uint32_t risc_addr;
3808 uint32_t risc_size; 3877 uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3811 struct req_que *req = ha->req_q_map[0]; 3880 struct req_que *req = ha->req_q_map[0];
3812 3881
3813 qla_printk(KERN_INFO, ha, 3882 qla_printk(KERN_INFO, ha,
3814 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3883 "FW: Loading from flash (%x)...\n", faddr);
3815 3884
3816 rval = QLA_SUCCESS; 3885 rval = QLA_SUCCESS;
3817 3886
3818 segments = FA_RISC_CODE_SEGMENTS; 3887 segments = FA_RISC_CODE_SEGMENTS;
3819 faddr = ha->flt_region_fw;
3820 dcode = (uint32_t *)req->ring; 3888 dcode = (uint32_t *)req->ring;
3821 *srisc_addr = 0; 3889 *srisc_addr = 0;
3822 3890
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4104{ 4172{
4105 int rval; 4173 int rval;
4106 4174
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4177
4107 /* 4178 /*
4108 * FW Load priority: 4179 * FW Load priority:
4109 * 1) Firmware via request-firmware interface (.bin file). 4180 * 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4113 if (rval == QLA_SUCCESS) 4184 if (rval == QLA_SUCCESS)
4114 return rval; 4185 return rval;
4115 4186
4116 return qla24xx_load_risc_flash(vha, srisc_addr); 4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4117} 4189}
4118 4190
4119int 4191int
4120qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4192qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4121{ 4193{
4122 int rval; 4194 int rval;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 if (ql2xfwloadbin == 2)
4198 goto try_blob_fw;
4123 4199
4124 /* 4200 /*
4125 * FW Load priority: 4201 * FW Load priority:
4126 * 1) Firmware residing in flash. 4202 * 1) Firmware residing in flash.
4127 * 2) Firmware via request-firmware interface (.bin file). 4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4128 */ 4205 */
4129 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4130 if (rval == QLA_SUCCESS) 4207 if (rval == QLA_SUCCESS)
4131 return rval; 4208 return rval;
4132 4209
4133 return qla24xx_load_risc_blob(vha, srisc_addr); 4210try_blob_fw:
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4213 return rval;
4214
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4219 return rval;
4220
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4224
4225 return rval;
4134} 4226}
4135 4227
4136void 4228void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4146 4238
4147 ret = qla2x00_stop_firmware(vha); 4239 ret = qla2x00_stop_firmware(vha);
4148 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4149 retries ; retries--) { 4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4150 ha->isp_ops->reset_chip(vha); 4242 ha->isp_ops->reset_chip(vha);
4151 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4152 continue; 4244 continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4166 struct qla_hw_data *ha = vha->hw; 4258 struct qla_hw_data *ha = vha->hw;
4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4260 struct req_que *req;
4169 struct rsp_que *rsp = req->rsp; 4261 struct rsp_que *rsp;
4170 4262
4171 if (!vha->vp_idx) 4263 if (!vha->vp_idx)
4172 return -EINVAL; 4264 return -EINVAL;
4173 4265
4174 rval = qla2x00_fw_ready(base_vha); 4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4269 else
4270 req = vha->req;
4271 rsp = req->rsp;
4272
4175 if (rval == QLA_SUCCESS) { 4273 if (rval == QLA_SUCCESS) {
4176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4177 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4305 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4306 chksum += le32_to_cpu(*dptr++); 4404 chksum += le32_to_cpu(*dptr++);
4307 4405
4308 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4309 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4310 4408
4311 /* Bad NVRAM data, set defaults parameters. */ 4409 /* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4329 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4330 nv->exchange_count = __constant_cpu_to_le16(0); 4428 nv->exchange_count = __constant_cpu_to_le16(0);
4331 nv->port_name[0] = 0x21; 4429 nv->port_name[0] = 0x21;
4332 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4430 nv->port_name[1] = 0x00 + ha->port_no;
4333 nv->port_name[2] = 0x00; 4431 nv->port_name[2] = 0x00;
4334 nv->port_name[3] = 0xe0; 4432 nv->port_name[3] = 0xe0;
4335 nv->port_name[4] = 0x8b; 4433 nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4358 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4359 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4360 nv->link_down_timeout = __constant_cpu_to_le16(30); 4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4361 nv->enode_mac[0] = 0x01; 4459 nv->enode_mac[0] = 0x00;
4362 nv->enode_mac[1] = 0x02; 4460 nv->enode_mac[1] = 0x02;
4363 nv->enode_mac[2] = 0x03; 4461 nv->enode_mac[2] = 0x03;
4364 nv->enode_mac[3] = 0x04; 4462 nv->enode_mac[3] = 0x04;
4365 nv->enode_mac[4] = 0x05; 4463 nv->enode_mac[4] = 0x05;
4366 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4367 4465
4368 rval = 1; 4466 rval = 1;
4369 } 4467 }
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4396 icb->enode_mac[2] = 0x03; 4494 icb->enode_mac[2] = 0x03;
4397 icb->enode_mac[3] = 0x04; 4495 icb->enode_mac[3] = 0x04;
4398 icb->enode_mac[4] = 0x05; 4496 icb->enode_mac[4] = 0x05;
4399 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4400 } 4498 }
4401 4499
4402 /* Use extended-initialization control block. */ 4500 /* Use extended-initialization control block. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730d..13396beae2ce 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); 16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 17
18static void qla25xx_set_que(srb_t *, struct rsp_que **);
18/** 19/**
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * @cmd: SCSI command 21 * @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */ 94 */
94static inline cont_entry_t * 95static inline cont_entry_t *
95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) 96qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96{ 97{
97 cont_entry_t *cont_pkt; 98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
98 /* Adjust ring index. */ 100 /* Adjust ring index. */
99 req->ring_index++; 101 req->ring_index++;
100 if (req->ring_index == req->length) { 102 if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 122 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 123 */
122static inline cont_a64_entry_t * 124static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) 125qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124{ 126{
125 cont_a64_entry_t *cont_pkt; 127 cont_a64_entry_t *cont_pkt;
126 128
129 struct req_que *req = vha->req;
127 /* Adjust ring index. */ 130 /* Adjust ring index. */
128 req->ring_index++; 131 req->ring_index++;
129 if (req->ring_index == req->length) { 132 if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
159 struct scsi_cmnd *cmd; 162 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 163 struct scatterlist *sg;
161 int i; 164 int i;
162 struct req_que *req;
163 165
164 cmd = sp->cmd; 166 cmd = sp->cmd;
165 167
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
174 } 176 }
175 177
176 vha = sp->fcport->vha; 178 vha = sp->fcport->vha;
177 req = sp->que;
178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 180
181 /* Three DSDs are available in the Command Type 2 IOCB */ 181 /* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB. 193 * Type 0 IOCB.
194 */ 194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7; 197 avail_dsds = 7;
198 } 198 }
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg; 221 struct scatterlist *sg;
222 int i; 222 int i;
223 struct req_que *req;
224 223
225 cmd = sp->cmd; 224 cmd = sp->cmd;
226 225
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
235 } 234 }
236 235
237 vha = sp->fcport->vha; 236 vha = sp->fcport->vha;
238 req = sp->que;
239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
241 238
242 /* Two DSDs are available in the Command Type 3 IOCB */ 239 /* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 * Five DSDs are available in the Continuation 251 * Five DSDs are available in the Continuation
255 * Type 1 IOCB. 252 * Type 1 IOCB.
256 */ 253 */
257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 avail_dsds = 5; 256 avail_dsds = 5;
260 } 257 }
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 350 /* Build command packet */
354 req->current_outstanding_cmd = handle; 351 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 352 req->outstanding_cmds[handle] = sp;
356 sp->que = req;
357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
358 req->cnt -= req_cnt; 354 req->cnt -= req_cnt;
359 355
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
453 mrk24->lun[2] = MSB(lun); 449 mrk24->lun[2] = MSB(lun);
454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 mrk24->vp_index = vha->vp_idx; 451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
456 } else { 453 } else {
457 SET_TARGET_ID(ha, mrk->target, loop_id); 454 SET_TARGET_ID(ha, mrk->target, loop_id);
458 mrk->lun = cpu_to_le16(lun); 455 mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
531 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) 528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 *dword_ptr++ = 0; 529 *dword_ptr++ = 0;
533 530
534 /* Set system defined field. */
535 pkt->sys_define = (uint8_t)req->ring_index;
536
537 /* Set entry count. */ 531 /* Set entry count. */
538 pkt->entry_count = 1; 532 pkt->entry_count = 1;
539 533
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 } 650 }
657 651
658 vha = sp->fcport->vha; 652 vha = sp->fcport->vha;
659 req = sp->que; 653 req = vha->req;
660 654
661 /* Set transfer direction */ 655 /* Set transfer direction */
662 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
687 * Five DSDs are available in the Continuation 681 * Five DSDs are available in the Continuation
688 * Type 1 IOCB. 682 * Type 1 IOCB.
689 */ 683 */
690 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
691 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
692 avail_dsds = 5; 686 avail_dsds = 5;
693 } 687 }
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
724 struct scsi_cmnd *cmd = sp->cmd; 718 struct scsi_cmnd *cmd = sp->cmd;
725 struct scsi_qla_host *vha = sp->fcport->vha; 719 struct scsi_qla_host *vha = sp->fcport->vha;
726 struct qla_hw_data *ha = vha->hw; 720 struct qla_hw_data *ha = vha->hw;
727 uint16_t que_id;
728 721
729 /* Setup device pointers. */ 722 /* Setup device pointers. */
730 ret = 0; 723 ret = 0;
731 que_id = vha->req_ques[0];
732 724
733 req = ha->req_q_map[que_id]; 725 qla25xx_set_que(sp, &rsp);
734 sp->que = req; 726 req = vha->req;
735 727
736 if (req->rsp)
737 rsp = req->rsp;
738 else
739 rsp = ha->rsp_q_map[que_id];
740 /* So we know we haven't pci_map'ed anything yet */ 728 /* So we know we haven't pci_map'ed anything yet */
741 tot_dsds = 0; 729 tot_dsds = 0;
742 730
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
794 req->cnt -= req_cnt; 782 req->cnt -= req_cnt;
795 783
796 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 cmd_pkt->handle = handle; 785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
798 786
799 /* Zero out remaining portion of packet. */ 787 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
823 811
824 /* Set total data segment count. */ 812 /* Set total data segment count. */
825 cmd_pkt->entry_count = (uint8_t)req_cnt; 813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
826 wmb(); 816 wmb();
827 817
828 /* Adjust ring index. */ 818 /* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
842 /* Manage unprocessed RIO/ZIO commands in response queue. */ 832 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha->flags.process_response_queue && 833 if (vha->flags.process_response_queue &&
844 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
845 qla24xx_process_response_queue(rsp); 835 qla24xx_process_response_queue(vha, rsp);
846 836
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_SUCCESS; 838 return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
855 845
856 return QLA_FUNCTION_FAILED; 846 return QLA_FUNCTION_FAILED;
857} 847}
848
849static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
850{
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
854
855 if (ql2xmultique_tag && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];
860}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e56..c8d0a176fea4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *); 18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20 19
21/** 20/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
51 status = 0; 50 status = 0;
52 51
53 spin_lock(&ha->hardware_lock); 52 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp); 53 vha = pci_get_drvdata(ha->pdev);
55 for (iter = 50; iter--; ) { 54 for (iter = 50; iter--; ) {
56 hccr = RD_REG_WORD(&reg->hccr); 55 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) { 56 if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
147 status = 0; 146 status = 0;
148 147
149 spin_lock(&ha->hardware_lock); 148 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp); 149 vha = pci_get_drvdata(ha->pdev);
151 for (iter = 50; iter--; ) { 150 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 151 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) { 152 if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
685 vha->host_no)); 684 vha->host_no));
686 685
687 if (IS_FWI2_CAPABLE(ha)) 686 if (IS_FWI2_CAPABLE(ha))
688 qla24xx_process_response_queue(rsp); 687 qla24xx_process_response_queue(vha, rsp);
689 else 688 else
690 qla2x00_process_response_queue(rsp); 689 qla2x00_process_response_queue(rsp);
691 break; 690 break;
@@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
766 struct qla_hw_data *ha = vha->hw; 765 struct qla_hw_data *ha = vha->hw;
767 struct req_que *req = NULL; 766 struct req_que *req = NULL;
768 767
769 req = ha->req_q_map[vha->req_ques[0]]; 768 if (!ql2xqfulltracking)
769 return;
770
771 req = vha->req;
770 if (!req) 772 if (!req)
771 return; 773 return;
772 if (req->max_q_depth <= sdev->queue_depth) 774 if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
808 fc_port_t *fcport; 810 fc_port_t *fcport;
809 struct scsi_device *sdev; 811 struct scsi_device *sdev;
810 812
813 if (!ql2xqfulltracking)
814 return;
815
811 sdev = sp->cmd->device; 816 sdev = sp->cmd->device;
812 if (sdev->queue_depth >= req->max_q_depth) 817 if (sdev->queue_depth >= req->max_q_depth)
813 return; 818 return;
@@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
858 qla2x00_ramp_up_queue_depth(vha, req, sp); 863 qla2x00_ramp_up_queue_depth(vha, req, sp);
859 qla2x00_sp_compl(ha, sp); 864 qla2x00_sp_compl(ha, sp);
860 } else { 865 } else {
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 866 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
862 vha->host_no)); 867 " handle(%d)\n", vha->host_no, req->id, index));
863 qla_printk(KERN_WARNING, ha, 868 qla_printk(KERN_WARNING, ha,
864 "Invalid ISP SCSI completion handle\n"); 869 "Invalid ISP SCSI completion handle\n");
865 870
@@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
881 uint16_t handle_cnt; 886 uint16_t handle_cnt;
882 uint16_t cnt; 887 uint16_t cnt;
883 888
884 vha = qla2x00_get_rsp_host(rsp); 889 vha = pci_get_drvdata(ha->pdev);
885 890
886 if (!vha->flags.online) 891 if (!vha->flags.online)
887 return; 892 return;
@@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
926 } 931 }
927 break; 932 break;
928 case STATUS_CONT_TYPE: 933 case STATUS_CONT_TYPE:
929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 934 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
930 break; 935 break;
931 default: 936 default:
932 /* Type Not Supported. */ 937 /* Type Not Supported. */
@@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
945} 950}
946 951
947static inline void 952static inline void
948qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 953qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
954 struct rsp_que *rsp)
949{ 955{
950 struct scsi_cmnd *cp = sp->cmd; 956 struct scsi_cmnd *cp = sp->cmd;
951 957
@@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
962 sp->request_sense_ptr += sense_len; 968 sp->request_sense_ptr += sense_len;
963 sp->request_sense_length -= sense_len; 969 sp->request_sense_length -= sense_len;
964 if (sp->request_sense_length != 0) 970 if (sp->request_sense_length != 0)
965 sp->fcport->vha->status_srb = sp; 971 rsp->status_srb = sp;
966 972
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 973 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 974 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 998 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
993 uint8_t *rsp_info, *sense_data; 999 uint8_t *rsp_info, *sense_data;
994 struct qla_hw_data *ha = vha->hw; 1000 struct qla_hw_data *ha = vha->hw;
995 struct req_que *req = rsp->req; 1001 uint32_t handle;
1002 uint16_t que;
1003 struct req_que *req;
996 1004
997 sts = (sts_entry_t *) pkt; 1005 sts = (sts_entry_t *) pkt;
998 sts24 = (struct sts_entry_24xx *) pkt; 1006 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1003 comp_status = le16_to_cpu(sts->comp_status); 1011 comp_status = le16_to_cpu(sts->comp_status);
1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1012 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1005 } 1013 }
1006 1014 handle = (uint32_t) LSW(sts->handle);
1015 que = MSW(sts->handle);
1016 req = ha->req_q_map[que];
1007 /* Fast path completion. */ 1017 /* Fast path completion. */
1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1018 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1009 qla2x00_process_completed_request(vha, req, sts->handle); 1019 qla2x00_process_completed_request(vha, req, handle);
1010 1020
1011 return; 1021 return;
1012 } 1022 }
1013 1023
1014 /* Validate handle. */ 1024 /* Validate handle. */
1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1025 if (handle < MAX_OUTSTANDING_COMMANDS) {
1016 sp = req->outstanding_cmds[sts->handle]; 1026 sp = req->outstanding_cmds[handle];
1017 req->outstanding_cmds[sts->handle] = NULL; 1027 req->outstanding_cmds[handle] = NULL;
1018 } else 1028 } else
1019 sp = NULL; 1029 sp = NULL;
1020 1030
@@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1030 cp = sp->cmd; 1040 cp = sp->cmd;
1031 if (cp == NULL) { 1041 if (cp == NULL) {
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1042 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1043 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1034 qla_printk(KERN_WARNING, ha, 1044 qla_printk(KERN_WARNING, ha,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1045 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1036 1046
@@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1121 scsi_status)); 1131 scsi_status));
1122 1132
1123 /* Adjust queue depth for all luns on the port. */ 1133 /* Adjust queue depth for all luns on the port. */
1134 if (!ql2xqfulltracking)
1135 break;
1124 fcport->last_queue_full = jiffies; 1136 fcport->last_queue_full = jiffies;
1125 starget_for_each_device(cp->device->sdev_target, 1137 starget_for_each_device(cp->device->sdev_target,
1126 fcport, qla2x00_adjust_sdev_qdepth_down); 1138 fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1145 if (!(scsi_status & SS_SENSE_LEN_VALID))
1134 break; 1146 break;
1135 1147
1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1148 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1137 break; 1149 break;
1138 1150
1139 case CS_DATA_UNDERRUN: 1151 case CS_DATA_UNDERRUN:
@@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1179 * Adjust queue depth for all luns on the 1191 * Adjust queue depth for all luns on the
1180 * port. 1192 * port.
1181 */ 1193 */
1194 if (!ql2xqfulltracking)
1195 break;
1182 fcport->last_queue_full = jiffies; 1196 fcport->last_queue_full = jiffies;
1183 starget_for_each_device( 1197 starget_for_each_device(
1184 cp->device->sdev_target, fcport, 1198 cp->device->sdev_target, fcport,
@@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1206 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193 break; 1207 break;
1194 1208
1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1209 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196 } else { 1210 } else {
1197 /* 1211 /*
1198 * If RISC reports underrun and target does not report 1212 * If RISC reports underrun and target does not report
1199 * it then we must have a lost frame, so tell upper 1213 * it then we must have a lost frame, so tell upper
1200 * layer to retry it by reporting a bus busy. 1214 * layer to retry it by reporting an error.
1201 */ 1215 */
1202 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1216 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1217 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1207 cp->device->id, cp->device->lun, resid, 1221 cp->device->id, cp->device->lun, resid,
1208 scsi_bufflen(cp))); 1222 scsi_bufflen(cp)));
1209 1223
1210 cp->result = DID_BUS_BUSY << 16; 1224 cp->result = DID_ERROR << 16;
1211 break; 1225 break;
1212 } 1226 }
1213 1227
@@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1334 } 1348 }
1335 1349
1336 /* Place command on done queue. */ 1350 /* Place command on done queue. */
1337 if (vha->status_srb == NULL) 1351 if (rsp->status_srb == NULL)
1338 qla2x00_sp_compl(ha, sp); 1352 qla2x00_sp_compl(ha, sp);
1339} 1353}
1340 1354
@@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1346 * Extended sense data. 1360 * Extended sense data.
1347 */ 1361 */
1348static void 1362static void
1349qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1363qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1350{ 1364{
1351 uint8_t sense_sz = 0; 1365 uint8_t sense_sz = 0;
1352 struct qla_hw_data *ha = vha->hw; 1366 struct qla_hw_data *ha = rsp->hw;
1353 srb_t *sp = vha->status_srb; 1367 srb_t *sp = rsp->status_srb;
1354 struct scsi_cmnd *cp; 1368 struct scsi_cmnd *cp;
1355 1369
1356 if (sp != NULL && sp->request_sense_length != 0) { 1370 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1376 "cmd is NULL: already returned to OS (sp=%p)\n",
1363 sp); 1377 sp);
1364 1378
1365 vha->status_srb = NULL; 1379 rsp->status_srb = NULL;
1366 return; 1380 return;
1367 } 1381 }
1368 1382
@@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1383 1397
1384 /* Place command on done queue. */ 1398 /* Place command on done queue. */
1385 if (sp->request_sense_length == 0) { 1399 if (sp->request_sense_length == 0) {
1386 vha->status_srb = NULL; 1400 rsp->status_srb = NULL;
1387 qla2x00_sp_compl(ha, sp); 1401 qla2x00_sp_compl(ha, sp);
1388 } 1402 }
1389 } 1403 }
@@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1399{ 1413{
1400 srb_t *sp; 1414 srb_t *sp;
1401 struct qla_hw_data *ha = vha->hw; 1415 struct qla_hw_data *ha = vha->hw;
1402 struct req_que *req = rsp->req; 1416 uint32_t handle = LSW(pkt->handle);
1417 uint16_t que = MSW(pkt->handle);
1418 struct req_que *req = ha->req_q_map[que];
1403#if defined(QL_DEBUG_LEVEL_2) 1419#if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt->entry_status & RF_INV_E_ORDER) 1420 if (pkt->entry_status & RF_INV_E_ORDER)
1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1421 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1417#endif 1433#endif
1418 1434
1419 /* Validate handle. */ 1435 /* Validate handle. */
1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1436 if (handle < MAX_OUTSTANDING_COMMANDS)
1421 sp = req->outstanding_cmds[pkt->handle]; 1437 sp = req->outstanding_cmds[handle];
1422 else 1438 else
1423 sp = NULL; 1439 sp = NULL;
1424 1440
1425 if (sp) { 1441 if (sp) {
1426 /* Free outstanding command slot. */ 1442 /* Free outstanding command slot. */
1427 req->outstanding_cmds[pkt->handle] = NULL; 1443 req->outstanding_cmds[handle] = NULL;
1428 1444
1429 /* Bad payload or header */ 1445 /* Bad payload or header */
1430 if (pkt->entry_status & 1446 if (pkt->entry_status &
@@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1486 * qla24xx_process_response_queue() - Process response queue entries. 1502 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context 1503 * @ha: SCSI driver HA context
1488 */ 1504 */
1489void 1505void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1490qla24xx_process_response_queue(struct rsp_que *rsp) 1506 struct rsp_que *rsp)
1491{ 1507{
1492 struct sts_entry_24xx *pkt; 1508 struct sts_entry_24xx *pkt;
1493 struct scsi_qla_host *vha;
1494
1495 vha = qla2x00_get_rsp_host(rsp);
1496 1509
1497 if (!vha->flags.online) 1510 if (!vha->flags.online)
1498 return; 1511 return;
@@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
1523 qla2x00_status_entry(vha, rsp, pkt); 1536 qla2x00_status_entry(vha, rsp, pkt);
1524 break; 1537 break;
1525 case STATUS_CONT_TYPE: 1538 case STATUS_CONT_TYPE:
1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1539 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1527 break; 1540 break;
1528 case VP_RPT_ID_IOCB_TYPE: 1541 case VP_RPT_ID_IOCB_TYPE:
1529 qla24xx_report_id_acquisition(vha, 1542 qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1626 status = 0; 1639 status = 0;
1627 1640
1628 spin_lock(&ha->hardware_lock); 1641 spin_lock(&ha->hardware_lock);
1629 vha = qla2x00_get_rsp_host(rsp); 1642 vha = pci_get_drvdata(ha->pdev);
1630 for (iter = 50; iter--; ) { 1643 for (iter = 50; iter--; ) {
1631 stat = RD_REG_DWORD(&reg->host_status); 1644 stat = RD_REG_DWORD(&reg->host_status);
1632 if (stat & HSRX_RISC_PAUSED) { 1645 if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1664 break; 1677 break;
1665 case 0x13: 1678 case 0x13:
1666 case 0x14: 1679 case 0x14:
1667 qla24xx_process_response_queue(rsp); 1680 qla24xx_process_response_queue(vha, rsp);
1668 break; 1681 break;
1669 default: 1682 default:
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1683 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1692 struct qla_hw_data *ha; 1705 struct qla_hw_data *ha;
1693 struct rsp_que *rsp; 1706 struct rsp_que *rsp;
1694 struct device_reg_24xx __iomem *reg; 1707 struct device_reg_24xx __iomem *reg;
1708 struct scsi_qla_host *vha;
1695 1709
1696 rsp = (struct rsp_que *) dev_id; 1710 rsp = (struct rsp_que *) dev_id;
1697 if (!rsp) { 1711 if (!rsp) {
@@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1704 1718
1705 spin_lock_irq(&ha->hardware_lock); 1719 spin_lock_irq(&ha->hardware_lock);
1706 1720
1707 qla24xx_process_response_queue(rsp); 1721 vha = qla25xx_get_host(rsp);
1722 qla24xx_process_response_queue(vha, rsp);
1708 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1723 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1709 1724
1710 spin_unlock_irq(&ha->hardware_lock); 1725 spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1717{ 1732{
1718 struct qla_hw_data *ha; 1733 struct qla_hw_data *ha;
1719 struct rsp_que *rsp; 1734 struct rsp_que *rsp;
1720 struct device_reg_24xx __iomem *reg;
1721 1735
1722 rsp = (struct rsp_que *) dev_id; 1736 rsp = (struct rsp_que *) dev_id;
1723 if (!rsp) { 1737 if (!rsp) {
@@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1726 return IRQ_NONE; 1740 return IRQ_NONE;
1727 } 1741 }
1728 ha = rsp->hw; 1742 ha = rsp->hw;
1729 reg = &ha->iobase->isp24;
1730 1743
1731 spin_lock_irq(&ha->hardware_lock); 1744 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1732
1733 qla24xx_process_response_queue(rsp);
1734
1735 spin_unlock_irq(&ha->hardware_lock);
1736 1745
1737 return IRQ_HANDLED; 1746 return IRQ_HANDLED;
1738} 1747}
@@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1760 status = 0; 1769 status = 0;
1761 1770
1762 spin_lock_irq(&ha->hardware_lock); 1771 spin_lock_irq(&ha->hardware_lock);
1763 vha = qla2x00_get_rsp_host(rsp); 1772 vha = pci_get_drvdata(ha->pdev);
1764 do { 1773 do {
1765 stat = RD_REG_DWORD(&reg->host_status); 1774 stat = RD_REG_DWORD(&reg->host_status);
1766 if (stat & HSRX_RISC_PAUSED) { 1775 if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1798 break; 1807 break;
1799 case 0x13: 1808 case 0x13:
1800 case 0x14: 1809 case 0x14:
1801 qla24xx_process_response_queue(rsp); 1810 qla24xx_process_response_queue(vha, rsp);
1802 break; 1811 break;
1803 default: 1812 default:
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1813 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id)
1822/* Interrupt handling helpers. */ 1831/* Interrupt handling helpers. */
1823 1832
1824struct qla_init_msix_entry { 1833struct qla_init_msix_entry {
1825 uint16_t entry;
1826 uint16_t index;
1827 const char *name; 1834 const char *name;
1828 irq_handler_t handler; 1835 irq_handler_t handler;
1829}; 1836};
1830 1837
1831static struct qla_init_msix_entry base_queue = { 1838static struct qla_init_msix_entry msix_entries[3] = {
1832 .entry = 0, 1839 { "qla2xxx (default)", qla24xx_msix_default },
1833 .index = 0, 1840 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1834 .name = "qla2xxx (default)", 1841 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
1835 .handler = qla24xx_msix_default,
1836};
1837
1838static struct qla_init_msix_entry base_rsp_queue = {
1839 .entry = 1,
1840 .index = 1,
1841 .name = "qla2xxx (rsp_q)",
1842 .handler = qla24xx_msix_rsp_q,
1843};
1844
1845static struct qla_init_msix_entry multi_rsp_queue = {
1846 .entry = 1,
1847 .index = 1,
1848 .name = "qla2xxx (multi_q)",
1849 .handler = qla25xx_msix_rsp_q,
1850}; 1842};
1851 1843
1852static void 1844static void
@@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1873 int i, ret; 1865 int i, ret;
1874 struct msix_entry *entries; 1866 struct msix_entry *entries;
1875 struct qla_msix_entry *qentry; 1867 struct qla_msix_entry *qentry;
1876 struct qla_init_msix_entry *msix_queue;
1877 1868
1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1869 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1879 GFP_KERNEL); 1870 GFP_KERNEL);
@@ -1900,7 +1891,7 @@ msix_failed:
1900 ha->msix_count, ret); 1891 ha->msix_count, ret);
1901 goto msix_out; 1892 goto msix_out;
1902 } 1893 }
1903 ha->max_queues = ha->msix_count - 1; 1894 ha->max_rsp_queues = ha->msix_count - 1;
1904 } 1895 }
1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1896 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1906 ha->msix_count, GFP_KERNEL); 1897 ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1909,27 @@ msix_failed:
1918 qentry->rsp = NULL; 1909 qentry->rsp = NULL;
1919 } 1910 }
1920 1911
1921 /* Enable MSI-X for AENs for queue 0 */ 1912 /* Enable MSI-X vectors for the base queue */
1922 qentry = &ha->msix_entries[0]; 1913 for (i = 0; i < 2; i++) {
1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1914 qentry = &ha->msix_entries[i];
1924 base_queue.name, rsp); 1915 ret = request_irq(qentry->vector, msix_entries[i].handler,
1925 if (ret) { 1916 0, msix_entries[i].name, rsp);
1926 qla_printk(KERN_WARNING, ha, 1917 if (ret) {
1918 qla_printk(KERN_WARNING, ha,
1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1919 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry->vector, ret); 1920 qentry->vector, ret);
1929 qla24xx_disable_msix(ha); 1921 qla24xx_disable_msix(ha);
1930 goto msix_out; 1922 ha->mqenable = 0;
1923 goto msix_out;
1924 }
1925 qentry->have_irq = 1;
1926 qentry->rsp = rsp;
1927 rsp->msix = qentry;
1931 } 1928 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934 1929
1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1930 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha->max_queues > 1 && ha->mqiobase) { 1931 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1937 ha->mqenable = 1; 1932 ha->mqenable = 1;
1938 msix_queue = &multi_rsp_queue;
1939 qla_printk(KERN_INFO, ha,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1941 ha->max_queues);
1942 } else {
1943 ha->mqenable = 0;
1944 msix_queue = &base_rsp_queue;
1945 }
1946
1947 qentry = &ha->msix_entries[1];
1948 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1949 msix_queue->name, rsp);
1950 if (ret) {
1951 qla_printk(KERN_WARNING, ha,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry->vector, ret);
1954 qla24xx_disable_msix(ha);
1955 ha->mqenable = 0;
1956 goto msix_out;
1957 }
1958 qentry->have_irq = 1;
1959 qentry->rsp = rsp;
1960 1933
1961msix_out: 1934msix_out:
1962 kfree(entries); 1935 kfree(entries);
@@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2063 } 2036 }
2064} 2037}
2065 2038
2066static struct scsi_qla_host *
2067qla2x00_get_rsp_host(struct rsp_que *rsp)
2068{
2069 srb_t *sp;
2070 struct qla_hw_data *ha = rsp->hw;
2071 struct scsi_qla_host *vha = NULL;
2072 struct sts_entry_24xx *pkt;
2073 struct req_que *req;
2074
2075 if (rsp->id) {
2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2077 req = rsp->req;
2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2079 sp = req->outstanding_cmds[pkt->handle];
2080 if (sp)
2081 vha = sp->fcport->vha;
2082 }
2083 }
2084 if (!vha)
2085 /* handle it in base queue */
2086 vha = pci_get_drvdata(ha->pdev);
2087
2088 return vha;
2089}
2090 2039
2091int qla25xx_request_irq(struct rsp_que *rsp) 2040int qla25xx_request_irq(struct rsp_que *rsp)
2092{ 2041{
2093 struct qla_hw_data *ha = rsp->hw; 2042 struct qla_hw_data *ha = rsp->hw;
2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2043 struct qla_init_msix_entry *intr = &msix_entries[2];
2095 struct qla_msix_entry *msix = rsp->msix; 2044 struct qla_msix_entry *msix = rsp->msix;
2096 int ret; 2045 int ret;
2097 2046
@@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2106 msix->rsp = rsp; 2055 msix->rsp = rsp;
2107 return ret; 2056 return ret;
2108} 2057}
2058
2059struct scsi_qla_host *
2060qla25xx_get_host(struct rsp_que *rsp)
2061{
2062 srb_t *sp;
2063 struct qla_hw_data *ha = rsp->hw;
2064 struct scsi_qla_host *vha = NULL;
2065 struct sts_entry_24xx *pkt;
2066 struct req_que *req;
2067 uint16_t que;
2068 uint32_t handle;
2069
2070 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2071 que = MSW(pkt->handle);
2072 handle = (uint32_t) LSW(pkt->handle);
2073 req = ha->req_q_map[que];
2074 if (handle < MAX_OUTSTANDING_COMMANDS) {
2075 sp = req->outstanding_cmds[handle];
2076 if (sp)
2077 return sp->fcport->vha;
2078 else
2079 goto base_que;
2080 }
2081base_que:
2082 vha = pci_get_drvdata(ha->pdev);
2083 return vha;
2084}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e67c1660bf46..451ece0760b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
408 * Context: 408 * Context:
409 * Kernel context. 409 * Kernel context.
410 */ 410 */
411void 411int
412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, 413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
414 uint32_t *mpi_caps, uint8_t *phy) 414 uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
427 mcp->flags = 0; 427 mcp->flags = 0;
428 mcp->tov = MBX_TOV_SECONDS; 428 mcp->tov = MBX_TOV_SECONDS;
429 rval = qla2x00_mailbox_command(vha, mcp); 429 rval = qla2x00_mailbox_command(vha, mcp);
430 if (rval != QLA_SUCCESS)
431 goto failed;
430 432
431 /* Return mailbox data. */ 433 /* Return mailbox data. */
432 *major = mcp->mb[1]; 434 *major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
446 phy[1] = mcp->mb[9] >> 8; 448 phy[1] = mcp->mb[9] >> 8;
447 phy[2] = mcp->mb[9] & 0xff; 449 phy[2] = mcp->mb[9] & 0xff;
448 } 450 }
449 451failed:
450 if (rval != QLA_SUCCESS) { 452 if (rval != QLA_SUCCESS) {
451 /*EMPTY*/ 453 /*EMPTY*/
452 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 454 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
455 /*EMPTY*/ 457 /*EMPTY*/
456 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 458 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
457 } 459 }
460 return rval;
458} 461}
459 462
460/* 463/*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
748 * Kernel context. 751 * Kernel context.
749 */ 752 */
750int 753int
751qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 754qla2x00_abort_command(srb_t *sp)
752{ 755{
753 unsigned long flags = 0; 756 unsigned long flags = 0;
754 fc_port_t *fcport;
755 int rval; 757 int rval;
756 uint32_t handle = 0; 758 uint32_t handle = 0;
757 mbx_cmd_t mc; 759 mbx_cmd_t mc;
758 mbx_cmd_t *mcp = &mc; 760 mbx_cmd_t *mcp = &mc;
761 fc_port_t *fcport = sp->fcport;
762 scsi_qla_host_t *vha = fcport->vha;
759 struct qla_hw_data *ha = vha->hw; 763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = vha->req;
760 765
761 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 766 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
762 767
763 fcport = sp->fcport;
764
765 spin_lock_irqsave(&ha->hardware_lock, flags); 768 spin_lock_irqsave(&ha->hardware_lock, flags);
766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 769 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
767 if (req->outstanding_cmds[handle] == sp) 770 if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
800} 803}
801 804
802int 805int
803qla2x00_abort_target(struct fc_port *fcport, unsigned int l) 806qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
804{ 807{
805 int rval, rval2; 808 int rval, rval2;
806 mbx_cmd_t mc; 809 mbx_cmd_t mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
813 816
814 l = l; 817 l = l;
815 vha = fcport->vha; 818 vha = fcport->vha;
816 req = vha->hw->req_q_map[0]; 819 req = vha->hw->req_q_map[tag];
817 rsp = vha->hw->rsp_q_map[0]; 820 rsp = vha->hw->rsp_q_map[tag];
818 mcp->mb[0] = MBC_ABORT_TARGET; 821 mcp->mb[0] = MBC_ABORT_TARGET;
819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 822 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
820 if (HAS_EXTENDED_IDS(vha->hw)) { 823 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
850} 853}
851 854
852int 855int
853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) 856qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
854{ 857{
855 int rval, rval2; 858 int rval, rval2;
856 mbx_cmd_t mc; 859 mbx_cmd_t mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 865 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
863 866
864 vha = fcport->vha; 867 vha = fcport->vha;
865 req = vha->hw->req_q_map[0]; 868 req = vha->hw->req_q_map[tag];
866 rsp = vha->hw->rsp_q_map[0]; 869 rsp = vha->hw->rsp_q_map[tag];
867 mcp->mb[0] = MBC_LUN_RESET; 870 mcp->mb[0] = MBC_LUN_RESET;
868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 871 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
869 if (HAS_EXTENDED_IDS(vha->hw)) 872 if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
931 mcp->mb[9] = vha->vp_idx; 934 mcp->mb[9] = vha->vp_idx;
932 mcp->out_mb = MBX_9|MBX_0; 935 mcp->out_mb = MBX_9|MBX_0;
933 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 936 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
937 if (IS_QLA81XX(vha->hw))
938 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
934 mcp->tov = MBX_TOV_SECONDS; 939 mcp->tov = MBX_TOV_SECONDS;
935 mcp->flags = 0; 940 mcp->flags = 0;
936 rval = qla2x00_mailbox_command(vha, mcp); 941 rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
952 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 957 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
953 vha->host_no, rval)); 958 vha->host_no, rval));
954 } else { 959 } else {
955 /*EMPTY*/
956 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 960 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
957 vha->host_no)); 961 vha->host_no));
962
963 if (IS_QLA81XX(vha->hw)) {
964 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
965 vha->fcoe_fcf_idx = mcp->mb[10];
966 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
967 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
968 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
969 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
970 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
971 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
972 }
958 } 973 }
959 974
960 return rval; 975 return rval;
@@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1252 1267
1253 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1254 mcp->out_mb = MBX_0; 1269 mcp->out_mb = MBX_0;
1255 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1270 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1256 mcp->tov = MBX_TOV_SECONDS; 1271 mcp->tov = MBX_TOV_SECONDS;
1257 mcp->flags = 0; 1272 mcp->flags = 0;
1258 rval = qla2x00_mailbox_command(vha, mcp); 1273 rval = qla2x00_mailbox_command(vha, mcp);
@@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1261 states[0] = mcp->mb[1]; 1276 states[0] = mcp->mb[1];
1262 states[1] = mcp->mb[2]; 1277 states[1] = mcp->mb[2];
1263 states[2] = mcp->mb[3]; 1278 states[2] = mcp->mb[3];
1279 states[3] = mcp->mb[4];
1280 states[4] = mcp->mb[5];
1264 1281
1265 if (rval != QLA_SUCCESS) { 1282 if (rval != QLA_SUCCESS) {
1266 /*EMPTY*/ 1283 /*EMPTY*/
@@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1480 dma_addr_t lg_dma; 1497 dma_addr_t lg_dma;
1481 uint32_t iop[2]; 1498 uint32_t iop[2];
1482 struct qla_hw_data *ha = vha->hw; 1499 struct qla_hw_data *ha = vha->hw;
1500 struct req_que *req;
1501 struct rsp_que *rsp;
1483 1502
1484 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1503 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1485 1504
1505 if (ql2xmultique_tag)
1506 req = ha->req_q_map[0];
1507 else
1508 req = vha->req;
1509 rsp = req->rsp;
1510
1486 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1511 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1487 if (lg == NULL) { 1512 if (lg == NULL) {
1488 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1513 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1493 1518
1494 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1519 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1495 lg->entry_count = 1; 1520 lg->entry_count = 1;
1521 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1496 lg->nport_handle = cpu_to_le16(loop_id); 1522 lg->nport_handle = cpu_to_le16(loop_id);
1497 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1523 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1498 if (opt & BIT_0) 1524 if (opt & BIT_0)
@@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1741 struct logio_entry_24xx *lg; 1767 struct logio_entry_24xx *lg;
1742 dma_addr_t lg_dma; 1768 dma_addr_t lg_dma;
1743 struct qla_hw_data *ha = vha->hw; 1769 struct qla_hw_data *ha = vha->hw;
1770 struct req_que *req;
1771 struct rsp_que *rsp;
1744 1772
1745 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1773 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1746 1774
@@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1752 } 1780 }
1753 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1781 memset(lg, 0, sizeof(struct logio_entry_24xx));
1754 1782
1783 if (ql2xmaxqueues > 1)
1784 req = ha->req_q_map[0];
1785 else
1786 req = vha->req;
1787 rsp = req->rsp;
1755 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1788 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1756 lg->entry_count = 1; 1789 lg->entry_count = 1;
1790 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1757 lg->nport_handle = cpu_to_le16(loop_id); 1791 lg->nport_handle = cpu_to_le16(loop_id);
1758 lg->control_flags = 1792 lg->control_flags =
1759 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1793 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1864 mbx_cmd_t mc; 1898 mbx_cmd_t mc;
1865 mbx_cmd_t *mcp = &mc; 1899 mbx_cmd_t *mcp = &mc;
1866 1900
1867 if (IS_QLA81XX(vha->hw))
1868 return QLA_SUCCESS;
1869
1870 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1901 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1871 vha->host_no)); 1902 vha->host_no));
1872 1903
@@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2195} 2226}
2196 2227
2197int 2228int
2198qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 2229qla24xx_abort_command(srb_t *sp)
2199{ 2230{
2200 int rval; 2231 int rval;
2201 fc_port_t *fcport;
2202 unsigned long flags = 0; 2232 unsigned long flags = 0;
2203 2233
2204 struct abort_entry_24xx *abt; 2234 struct abort_entry_24xx *abt;
2205 dma_addr_t abt_dma; 2235 dma_addr_t abt_dma;
2206 uint32_t handle; 2236 uint32_t handle;
2237 fc_port_t *fcport = sp->fcport;
2238 struct scsi_qla_host *vha = fcport->vha;
2207 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2240 struct req_que *req = vha->req;
2208 2241
2209 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2242 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2210 2243
2211 fcport = sp->fcport;
2212
2213 spin_lock_irqsave(&ha->hardware_lock, flags); 2244 spin_lock_irqsave(&ha->hardware_lock, flags);
2214 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2245 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2215 if (req->outstanding_cmds[handle] == sp) 2246 if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2231 2262
2232 abt->entry_type = ABORT_IOCB_TYPE; 2263 abt->entry_type = ABORT_IOCB_TYPE;
2233 abt->entry_count = 1; 2264 abt->entry_count = 1;
2265 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2234 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2266 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2235 abt->handle_to_abort = handle; 2267 abt->handle_to_abort = handle;
2236 abt->port_id[0] = fcport->d_id.b.al_pa; 2268 abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd {
2272 2304
2273static int 2305static int
2274__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 2306__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2275 unsigned int l) 2307 unsigned int l, int tag)
2276{ 2308{
2277 int rval, rval2; 2309 int rval, rval2;
2278 struct tsk_mgmt_cmd *tsk; 2310 struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2286 2318
2287 vha = fcport->vha; 2319 vha = fcport->vha;
2288 ha = vha->hw; 2320 ha = vha->hw;
2289 req = ha->req_q_map[0]; 2321 req = vha->req;
2290 rsp = ha->rsp_q_map[0]; 2322 if (ql2xmultique_tag)
2323 rsp = ha->rsp_q_map[tag + 1];
2324 else
2325 rsp = req->rsp;
2291 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2326 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2292 if (tsk == NULL) { 2327 if (tsk == NULL) {
2293 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2328 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2298 2333
2299 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2334 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2300 tsk->p.tsk.entry_count = 1; 2335 tsk->p.tsk.entry_count = 1;
2336 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2301 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2337 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2302 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2338 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2303 tsk->p.tsk.control_flags = cpu_to_le32(type); 2339 tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2344} 2380}
2345 2381
2346int 2382int
2347qla24xx_abort_target(struct fc_port *fcport, unsigned int l) 2383qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2348{ 2384{
2349 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); 2385 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2350} 2386}
2351 2387
2352int 2388int
2353qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) 2389qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2354{ 2390{
2355 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); 2391 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2356} 2392}
2357 2393
2358int 2394int
@@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2446 if (rval != QLA_SUCCESS) { 2482 if (rval != QLA_SUCCESS) {
2447 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2483 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2448 vha->host_no, rval)); 2484 vha->host_no, rval));
2485 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2486 rval = QLA_INVALID_COMMAND;
2449 } else { 2487 } else {
2450 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2488 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2451 } 2489 }
@@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2717 if (vp_idx == 0) 2755 if (vp_idx == 0)
2718 return; 2756 return;
2719 2757
2720 if (MSB(stat) == 1) 2758 if (MSB(stat) == 1) {
2759 DEBUG2(printk("scsi(%ld): Could not acquire ID for "
2760 "VP[%d].\n", vha->host_no, vp_idx));
2721 return; 2761 return;
2762 }
2722 2763
2723 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2764 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
2724 if (vp_idx == vp->vp_idx) 2765 if (vp_idx == vp->vp_idx)
@@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3141 WRT_REG_DWORD(&reg->req_q_in, 0); 3182 WRT_REG_DWORD(&reg->req_q_in, 0);
3142 WRT_REG_DWORD(&reg->req_q_out, 0); 3183 WRT_REG_DWORD(&reg->req_q_out, 0);
3143 } 3184 }
3185 req->req_q_in = &reg->req_q_in;
3186 req->req_q_out = &reg->req_q_out;
3144 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3187 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3145 3188
3146 rval = qla2x00_mailbox_command(vha, mcp); 3189 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3167 mcp->mb[6] = MSW(MSD(rsp->dma)); 3210 mcp->mb[6] = MSW(MSD(rsp->dma));
3168 mcp->mb[7] = LSW(MSD(rsp->dma)); 3211 mcp->mb[7] = LSW(MSD(rsp->dma));
3169 mcp->mb[5] = rsp->length; 3212 mcp->mb[5] = rsp->length;
3170 mcp->mb[11] = rsp->vp_idx;
3171 mcp->mb[14] = rsp->msix->entry; 3213 mcp->mb[14] = rsp->msix->entry;
3172 mcp->mb[13] = rsp->rid; 3214 mcp->mb[13] = rsp->rid;
3173 3215
@@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3179 mcp->mb[8] = 0; 3221 mcp->mb[8] = 0;
3180 /* que out ptr index */ 3222 /* que out ptr index */
3181 mcp->mb[9] = 0; 3223 mcp->mb[9] = 0;
3182 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 3224 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3183 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3225 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3184 mcp->in_mb = MBX_0; 3226 mcp->in_mb = MBX_0;
3185 mcp->flags = MBX_DMA_OUT; 3227 mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3384 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3426 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
3385 vha->host_no, rval, mcp->mb[0])); 3427 vha->host_no, rval, mcp->mb[0]));
3386 } else { 3428 } else {
3387 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3429 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3388 } 3430 }
3389 3431
3390 return rval; 3432 return rval;
@@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3428 3470
3429 return rval; 3471 return rval;
3430} 3472}
3473
3474int
3475qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3476 uint16_t size_in_bytes, uint16_t *actual_size)
3477{
3478 int rval;
3479 mbx_cmd_t mc;
3480 mbx_cmd_t *mcp = &mc;
3481
3482 if (!IS_QLA81XX(vha->hw))
3483 return QLA_FUNCTION_FAILED;
3484
3485 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3486
3487 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3488 mcp->mb[2] = MSW(stats_dma);
3489 mcp->mb[3] = LSW(stats_dma);
3490 mcp->mb[6] = MSW(MSD(stats_dma));
3491 mcp->mb[7] = LSW(MSD(stats_dma));
3492 mcp->mb[8] = size_in_bytes >> 2;
3493 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3494 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3495 mcp->tov = MBX_TOV_SECONDS;
3496 mcp->flags = 0;
3497 rval = qla2x00_mailbox_command(vha, mcp);
3498
3499 if (rval != QLA_SUCCESS) {
3500 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3501 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3502 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3503 } else {
3504 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3505
3506 *actual_size = mcp->mb[2] << 2;
3507 }
3508
3509 return rval;
3510}
3511
3512int
3513qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3514 uint16_t size)
3515{
3516 int rval;
3517 mbx_cmd_t mc;
3518 mbx_cmd_t *mcp = &mc;
3519
3520 if (!IS_QLA81XX(vha->hw))
3521 return QLA_FUNCTION_FAILED;
3522
3523 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3524
3525 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3526 mcp->mb[1] = 0;
3527 mcp->mb[2] = MSW(tlv_dma);
3528 mcp->mb[3] = LSW(tlv_dma);
3529 mcp->mb[6] = MSW(MSD(tlv_dma));
3530 mcp->mb[7] = LSW(MSD(tlv_dma));
3531 mcp->mb[8] = size;
3532 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3533 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3534 mcp->tov = MBX_TOV_SECONDS;
3535 mcp->flags = 0;
3536 rval = qla2x00_mailbox_command(vha, mcp);
3537
3538 if (rval != QLA_SUCCESS) {
3539 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3540 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3541 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3542 } else {
3543 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3544 }
3545
3546 return rval;
3547}
3548
3549int
3550qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3551{
3552 int rval;
3553 mbx_cmd_t mc;
3554 mbx_cmd_t *mcp = &mc;
3555
3556 if (!IS_FWI2_CAPABLE(vha->hw))
3557 return QLA_FUNCTION_FAILED;
3558
3559 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3560
3561 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3562 mcp->mb[1] = LSW(risc_addr);
3563 mcp->mb[8] = MSW(risc_addr);
3564 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3565 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3566 mcp->tov = 30;
3567 mcp->flags = 0;
3568 rval = qla2x00_mailbox_command(vha, mcp);
3569 if (rval != QLA_SUCCESS) {
3570 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3571 vha->host_no, rval, mcp->mb[0]));
3572 } else {
3573 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3574 *data = mcp->mb[3] << 16 | mcp->mb[2];
3575 }
3576
3577 return rval;
3578}
3579
3580int
3581qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3582{
3583 int rval;
3584 mbx_cmd_t mc;
3585 mbx_cmd_t *mcp = &mc;
3586
3587 if (!IS_FWI2_CAPABLE(vha->hw))
3588 return QLA_FUNCTION_FAILED;
3589
3590 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3591
3592 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3593 mcp->mb[1] = LSW(risc_addr);
3594 mcp->mb[2] = LSW(data);
3595 mcp->mb[3] = MSW(data);
3596 mcp->mb[8] = MSW(risc_addr);
3597 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3598 mcp->in_mb = MBX_0;
3599 mcp->tov = 30;
3600 mcp->flags = 0;
3601 rval = qla2x00_mailbox_command(vha, mcp);
3602 if (rval != QLA_SUCCESS) {
3603 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3604 vha->host_no, rval, mcp->mb[0]));
3605 } else {
3606 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3607 }
3608
3609 return rval;
3610}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e3008..650bcef08f2a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 398
399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
400 400
401 memset(vha->req_ques, 0, sizeof(vha->req_ques)); 401 vha->req = base_vha->req;
402 vha->req_ques[0] = ha->req_q_map[0]->id; 402 host->can_queue = base_vha->req->length + 128;
403 host->can_queue = ha->req_q_map[0]->length + 128;
404 host->this_id = 255; 403 host->this_id = 255;
405 host->cmd_per_lun = 3; 404 host->cmd_per_lun = 3;
406 host->max_cmd_len = MAX_CMDSZ; 405 host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
515 514
516/* Delete all queues for a given vhost */ 515/* Delete all queues for a given vhost */
517int 516int
518qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) 517qla25xx_delete_queues(struct scsi_qla_host *vha)
519{ 518{
520 int cnt, ret = 0; 519 int cnt, ret = 0;
521 struct req_que *req = NULL; 520 struct req_que *req = NULL;
522 struct rsp_que *rsp = NULL; 521 struct rsp_que *rsp = NULL;
523 struct qla_hw_data *ha = vha->hw; 522 struct qla_hw_data *ha = vha->hw;
524 523
525 if (que_no) { 524 /* Delete request queues */
526 /* Delete request queue */ 525 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
527 req = ha->req_q_map[que_no]; 526 req = ha->req_q_map[cnt];
528 if (req) { 527 if (req) {
529 rsp = req->rsp;
530 ret = qla25xx_delete_req_que(vha, req); 528 ret = qla25xx_delete_req_que(vha, req);
531 if (ret != QLA_SUCCESS) { 529 if (ret != QLA_SUCCESS) {
532 qla_printk(KERN_WARNING, ha, 530 qla_printk(KERN_WARNING, ha,
533 "Couldn't delete req que %d\n", req->id); 531 "Couldn't delete req que %d\n",
532 req->id);
534 return ret; 533 return ret;
535 } 534 }
536 /* Delete associated response queue */
537 if (rsp) {
538 ret = qla25xx_delete_rsp_que(vha, rsp);
539 if (ret != QLA_SUCCESS) {
540 qla_printk(KERN_WARNING, ha,
541 "Couldn't delete rsp que %d\n",
542 rsp->id);
543 return ret;
544 }
545 }
546 } 535 }
547 } else { /* delete all queues of this host */ 536 }
548 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { 537
549 /* Delete request queues */ 538 /* Delete response queues */
550 req = ha->req_q_map[vha->req_ques[cnt]]; 539 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
551 if (req && req->id) { 540 rsp = ha->rsp_q_map[cnt];
552 rsp = req->rsp; 541 if (rsp) {
553 ret = qla25xx_delete_req_que(vha, req); 542 ret = qla25xx_delete_rsp_que(vha, rsp);
554 if (ret != QLA_SUCCESS) { 543 if (ret != QLA_SUCCESS) {
555 qla_printk(KERN_WARNING, ha, 544 qla_printk(KERN_WARNING, ha,
556 "Couldn't delete req que %d\n", 545 "Couldn't delete rsp que %d\n",
557 vha->req_ques[cnt]); 546 rsp->id);
558 return ret; 547 return ret;
559 }
560 vha->req_ques[cnt] = ha->req_q_map[0]->id;
561 /* Delete associated response queue */
562 if (rsp && rsp->id) {
563 ret = qla25xx_delete_rsp_que(vha, rsp);
564 if (ret != QLA_SUCCESS) {
565 qla_printk(KERN_WARNING, ha,
566 "Couldn't delete rsp que %d\n",
567 rsp->id);
568 return ret;
569 }
570 }
571 } 548 }
572 } 549 }
573 } 550 }
574 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
575 vha->vp_idx);
576 return ret; 551 return ret;
577} 552}
578 553
579int 554int
580qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 555qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
581 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) 556 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
582{ 557{
583 int ret = 0; 558 int ret = 0;
584 struct req_que *req = NULL; 559 struct req_que *req = NULL;
585 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 560 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
586 uint16_t que_id = 0; 561 uint16_t que_id = 0;
587 device_reg_t __iomem *reg; 562 device_reg_t __iomem *reg;
563 uint32_t cnt;
588 564
589 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 565 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
590 if (req == NULL) { 566 if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 } 580 }
605 581
606 mutex_lock(&ha->vport_lock); 582 mutex_lock(&ha->vport_lock);
607 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 583 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
608 if (que_id >= ha->max_queues) { 584 if (que_id >= ha->max_req_queues) {
609 mutex_unlock(&ha->vport_lock); 585 mutex_unlock(&ha->vport_lock);
610 qla_printk(KERN_INFO, ha, "No resources to create " 586 qla_printk(KERN_INFO, ha, "No resources to create "
611 "additional request queue\n"); 587 "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
617 req->vp_idx = vp_idx; 593 req->vp_idx = vp_idx;
618 req->qos = qos; 594 req->qos = qos;
619 595
620 if (ha->rsp_q_map[rsp_que]) { 596 if (rsp_que < 0)
597 req->rsp = NULL;
598 else
621 req->rsp = ha->rsp_q_map[rsp_que]; 599 req->rsp = ha->rsp_q_map[rsp_que];
622 req->rsp->req = req;
623 }
624 /* Use alternate PCI bus number */ 600 /* Use alternate PCI bus number */
625 if (MSB(req->rid)) 601 if (MSB(req->rid))
626 options |= BIT_4; 602 options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
628 if (LSB(req->rid)) 604 if (LSB(req->rid))
629 options |= BIT_5; 605 options |= BIT_5;
630 req->options = options; 606 req->options = options;
607
608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
609 req->outstanding_cmds[cnt] = NULL;
610 req->current_outstanding_cmd = 1;
611
631 req->ring_ptr = req->ring; 612 req->ring_ptr = req->ring;
632 req->ring_index = 0; 613 req->ring_index = 0;
633 req->cnt = req->length; 614 req->cnt = req->length;
634 req->id = que_id; 615 req->id = que_id;
635 reg = ISP_QUE_REG(ha, que_id); 616 reg = ISP_QUE_REG(ha, que_id);
636 req->req_q_in = &reg->isp25mq.req_q_in;
637 req->req_q_out = &reg->isp25mq.req_q_out;
638 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 617 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
639 mutex_unlock(&ha->vport_lock); 618 mutex_unlock(&ha->vport_lock);
640 619
@@ -654,10 +633,19 @@ que_failed:
654 return 0; 633 return 0;
655} 634}
656 635
636static void qla_do_work(struct work_struct *work)
637{
638 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
639 struct scsi_qla_host *vha;
640
641 vha = qla25xx_get_host(rsp);
642 qla24xx_process_response_queue(vha, rsp);
643}
644
657/* create response queue */ 645/* create response queue */
658int 646int
659qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 647qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
660 uint8_t vp_idx, uint16_t rid) 648 uint8_t vp_idx, uint16_t rid, int req)
661{ 649{
662 int ret = 0; 650 int ret = 0;
663 struct rsp_que *rsp = NULL; 651 struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
672 goto que_failed; 660 goto que_failed;
673 } 661 }
674 662
675 rsp->length = RESPONSE_ENTRY_CNT_2300; 663 rsp->length = RESPONSE_ENTRY_CNT_MQ;
676 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 664 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
677 (rsp->length + 1) * sizeof(response_t), 665 (rsp->length + 1) * sizeof(response_t),
678 &rsp->dma, GFP_KERNEL); 666 &rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
683 } 671 }
684 672
685 mutex_lock(&ha->vport_lock); 673 mutex_lock(&ha->vport_lock);
686 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 674 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
687 if (que_id >= ha->max_queues) { 675 if (que_id >= ha->max_rsp_queues) {
688 mutex_unlock(&ha->vport_lock); 676 mutex_unlock(&ha->vport_lock);
689 qla_printk(KERN_INFO, ha, "No resources to create " 677 qla_printk(KERN_INFO, ha, "No resources to create "
690 "additional response queue\n"); 678 "additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
708 if (LSB(rsp->rid)) 696 if (LSB(rsp->rid))
709 options |= BIT_5; 697 options |= BIT_5;
710 rsp->options = options; 698 rsp->options = options;
711 rsp->ring_ptr = rsp->ring;
712 rsp->ring_index = 0;
713 rsp->id = que_id; 699 rsp->id = que_id;
714 reg = ISP_QUE_REG(ha, que_id); 700 reg = ISP_QUE_REG(ha, que_id);
715 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 701 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 mutex_unlock(&ha->vport_lock); 714 mutex_unlock(&ha->vport_lock);
729 goto que_failed; 715 goto que_failed;
730 } 716 }
717 if (req >= 0)
718 rsp->req = ha->req_q_map[req];
719 else
720 rsp->req = NULL;
731 721
732 qla2x00_init_response_q_entries(rsp); 722 qla2x00_init_response_q_entries(rsp);
733 723 if (rsp->hw->wq)
724 INIT_WORK(&rsp->q_work, qla_do_work);
734 return rsp->id; 725 return rsp->id;
735 726
736que_failed: 727que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
744 uint16_t options = 0; 735 uint16_t options = 0;
745 uint8_t ret = 0; 736 uint8_t ret = 0;
746 struct qla_hw_data *ha = vha->hw; 737 struct qla_hw_data *ha = vha->hw;
738 struct rsp_que *rsp;
747 739
748 options |= BIT_1; 740 options |= BIT_1;
749 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); 741 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
750 if (!ret) { 742 if (!ret) {
751 qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); 743 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
752 return ret; 744 return ret;
753 } else 745 } else
754 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); 746 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
747 rsp = ha->rsp_q_map[ret];
755 748
756 options = 0; 749 options = 0;
757 if (qos & BIT_7) 750 if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
759 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, 752 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
760 qos & ~BIT_7); 753 qos & ~BIT_7);
761 if (ret) { 754 if (ret) {
762 vha->req_ques[0] = ret; 755 vha->req = ha->req_q_map[ret];
763 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); 756 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
764 } else 757 } else
765 qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); 758 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
759 rsp->req = ha->req_q_map[ret];
766 760
767 return ret; 761 return ret;
768} 762}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e4fdcdad80d0..dcf011679c8b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(ql2xmaxqdepth, 77MODULE_PARM_DESC(ql2xmaxqdepth,
78 "Maximum queue depth to report for target devices."); 78 "Maximum queue depth to report for target devices.");
79 79
80int ql2xqfulltracking = 1;
81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfulltracking,
83 "Controls whether the driver tracks queue full status "
84 "returns and dynamically adjusts a scsi device's queue "
85 "depth. Default is 1, perform tracking. Set to 0 to "
86 "disable dynamic tracking and adjustment of queue depth.");
87
80int ql2xqfullrampup = 120; 88int ql2xqfullrampup = 120;
81module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfullrampup, 90MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
96 "Enables MQ settings " 104 "Enables MQ settings "
97 "Default is 1 for single queue. Set it to number \ 105 "Default is 1 for single queue. Set it to number \
98 of queues in MQ mode."); 106 of queues in MQ mode.");
107
108int ql2xmultique_tag;
109module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xmultique_tag,
111 "Enables CPU affinity settings for the driver "
112 "Default is 0 for no affinity of request and response IO. "
113 "Set it to 1 to turn on the cpu affinity.");
114
115int ql2xfwloadbin;
116module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
117MODULE_PARM_DESC(ql2xfwloadbin,
118 "Option to specify location from which to load ISP firmware:\n"
119 " 2 -- load firmware via the request_firmware() (hotplug)\n"
120 " interface.\n"
121 " 1 -- load firmware from flash.\n"
122 " 0 -- use default semantics.\n");
123
99/* 124/*
100 * SCSI host template entry points 125 * SCSI host template entry points
101 */ 126 */
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 212/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 213static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 214{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 215 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 216 GFP_KERNEL);
192 if (!ha->req_q_map) { 217 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 218 qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 220 goto fail_req_map;
196 } 221 }
197 222
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 223 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 224 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 225 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 226 qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
213 return -ENOMEM; 238 return -ENOMEM;
214} 239}
215 240
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 241static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 242{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 243 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 244 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 245 (req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 249 req = NULL;
233} 250}
234 251
252static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
253{
254 if (rsp && rsp->ring)
255 dma_free_coherent(&ha->pdev->dev,
256 (rsp->length + 1) * sizeof(response_t),
257 rsp->ring, rsp->dma);
258
259 kfree(rsp);
260 rsp = NULL;
261}
262
235static void qla2x00_free_queues(struct qla_hw_data *ha) 263static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 264{
237 struct req_que *req; 265 struct req_que *req;
238 struct rsp_que *rsp; 266 struct rsp_que *rsp;
239 int cnt; 267 int cnt;
240 268
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 269 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 270 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 271 qla2x00_free_req_que(ha, req);
272 }
273 kfree(ha->req_q_map);
274 ha->req_q_map = NULL;
275
276 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
277 rsp = ha->rsp_q_map[cnt];
278 qla2x00_free_rsp_que(ha, rsp);
245 } 279 }
246 kfree(ha->rsp_q_map); 280 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL; 281 ha->rsp_q_map = NULL;
282}
248 283
249 kfree(ha->req_q_map); 284static int qla25xx_setup_mode(struct scsi_qla_host *vha)
250 ha->req_q_map = NULL; 285{
286 uint16_t options = 0;
287 int ques, req, ret;
288 struct qla_hw_data *ha = vha->hw;
289
290 if (ql2xmultique_tag) {
291 /* CPU affinity mode */
292 ha->wq = create_workqueue("qla2xxx_wq");
293 /* create a request queue for IO */
294 options |= BIT_7;
295 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
296 QLA_DEFAULT_QUE_QOS);
297 if (!req) {
298 qla_printk(KERN_WARNING, ha,
299 "Can't create request queue\n");
300 goto fail;
301 }
302 vha->req = ha->req_q_map[req];
303 options |= BIT_1;
304 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
305 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
306 if (!ret) {
307 qla_printk(KERN_WARNING, ha,
308 "Response Queue create failed\n");
309 goto fail2;
310 }
311 }
312 DEBUG2(qla_printk(KERN_INFO, ha,
313 "CPU affinity mode enabled, no. of response"
314 " queues:%d, no. of request queues:%d\n",
315 ha->max_rsp_queues, ha->max_req_queues));
316 }
317 return 0;
318fail2:
319 qla25xx_delete_queues(vha);
320fail:
321 ha->mqenable = 0;
322 return 1;
251} 323}
252 324
253static char * 325static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
387 459
388 sp->fcport = fcport; 460 sp->fcport = fcport;
389 sp->cmd = cmd; 461 sp->cmd = cmd;
390 sp->que = ha->req_q_map[0];
391 sp->flags = 0; 462 sp->flags = 0;
392 CMD_SP(cmd) = (void *)sp; 463 CMD_SP(cmd) = (void *)sp;
393 cmd->scsi_done = done; 464 cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 683void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 684qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 685{
615 int cnt, que, id; 686 int cnt;
616 unsigned long flags; 687 unsigned long flags;
617 srb_t *sp; 688 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 689 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 691 struct req_que *req;
621 692
622 spin_lock_irqsave(&ha->hardware_lock, flags); 693 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 694 req = vha->req;
624 id = vha->req_ques[que]; 695 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 696 sp = req->outstanding_cmds[cnt];
626 if (!req) 697 if (!sp)
698 continue;
699 if (sp->fcport != fcport)
627 continue; 700 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 701
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 703 if (ha->isp_ops->abort_command(sp)) {
704 DEBUG2(qla_printk(KERN_WARNING, ha,
705 "Abort failed -- %lx\n",
706 sp->cmd->serial_number));
707 } else {
708 if (qla2x00_eh_wait_on_command(sp->cmd) !=
709 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 710 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 711 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 712 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 713 }
714 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 715 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 717}
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
693 unsigned long flags; 759 unsigned long flags;
694 int wait = 0; 760 int wait = 0;
695 struct qla_hw_data *ha = vha->hw; 761 struct qla_hw_data *ha = vha->hw;
696 struct req_que *req; 762 struct req_que *req = vha->req;
697 srb_t *spt; 763 srb_t *spt;
698 764
699 qla2x00_block_error_handler(cmd); 765 qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 spt = (srb_t *) CMD_SP(cmd); 775 spt = (srb_t *) CMD_SP(cmd);
710 if (!spt) 776 if (!spt)
711 return SUCCESS; 777 return SUCCESS;
712 req = spt->que;
713 778
714 /* Check active list for command command. */ 779 /* Check active list for command command. */
715 spin_lock_irqsave(&ha->hardware_lock, flags); 780 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 791 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 792
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 794 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 795 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 796 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 797 ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
777 return status; 842 return status;
778 843
779 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
780 req = sp->que; 845 req = vha->req;
781 for (cnt = 1; status == QLA_SUCCESS && 846 for (cnt = 1; status == QLA_SUCCESS &&
782 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 847 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
783 sp = req->outstanding_cmds[cnt]; 848 sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
820 885
821static int 886static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 887__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 888 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 889{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 890 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 891 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 906 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 907 goto eh_reset_failed;
843 err = 2; 908 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 909 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
910 != QLA_SUCCESS)
845 goto eh_reset_failed; 911 goto eh_reset_failed;
846 err = 3; 912 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 913 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
996 if (qla2x00_vp_abort_isp(vha)) 1062 if (qla2x00_vp_abort_isp(vha))
997 goto eh_host_reset_lock; 1063 goto eh_host_reset_lock;
998 } else { 1064 } else {
1065 if (ha->wq)
1066 flush_workqueue(ha->wq);
1067
999 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1068 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1000 if (qla2x00_abort_isp(base_vha)) { 1069 if (qla2x00_abort_isp(base_vha)) {
1001 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1070 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1037 struct fc_port *fcport; 1106 struct fc_port *fcport;
1038 struct qla_hw_data *ha = vha->hw; 1107 struct qla_hw_data *ha = vha->hw;
1039 1108
1040 if (ha->flags.enable_lip_full_login && !vha->vp_idx) { 1109 if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
1110 !IS_QLA81XX(ha)) {
1041 ret = qla2x00_full_login_lip(vha); 1111 ret = qla2x00_full_login_lip(vha);
1042 if (ret != QLA_SUCCESS) { 1112 if (ret != QLA_SUCCESS) {
1043 DEBUG2_3(printk("%s(%ld): failed: " 1113 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1064 if (fcport->port_type != FCT_TARGET) 1134 if (fcport->port_type != FCT_TARGET)
1065 continue; 1135 continue;
1066 1136
1067 ret = ha->isp_ops->target_reset(fcport, 0); 1137 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1068 if (ret != QLA_SUCCESS) { 1138 if (ret != QLA_SUCCESS) {
1069 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1139 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1070 "target_reset=%d d_id=%x.\n", __func__, 1140 "target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1088 struct req_que *req; 1158 struct req_que *req;
1089 1159
1090 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 spin_lock_irqsave(&ha->hardware_lock, flags);
1091 for (que = 0; que < ha->max_queues; que++) { 1161 for (que = 0; que < ha->max_req_queues; que++) {
1092 req = ha->req_q_map[que]; 1162 req = ha->req_q_map[que];
1093 if (!req) 1163 if (!req)
1094 continue; 1164 continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1123 scsi_qla_host_t *vha = shost_priv(sdev->host); 1193 scsi_qla_host_t *vha = shost_priv(sdev->host);
1124 struct qla_hw_data *ha = vha->hw; 1194 struct qla_hw_data *ha = vha->hw;
1125 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1126 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1196 struct req_que *req = vha->req;
1127 1197
1128 if (sdev->tagged_supported) 1198 if (sdev->tagged_supported)
1129 scsi_activate_tcq(sdev, req->max_q_depth); 1199 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1511 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1581 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1512 break; 1582 break;
1513 } 1583 }
1584
1585 /* Get adapter physical port no from interrupt pin register. */
1586 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1587 if (ha->port_no & 1)
1588 ha->flags.port0 = 1;
1589 else
1590 ha->flags.port0 = 0;
1514} 1591}
1515 1592
1516static int 1593static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1518{ 1595{
1519 resource_size_t pio; 1596 resource_size_t pio;
1520 uint16_t msix; 1597 uint16_t msix;
1598 int cpus;
1521 1599
1522 if (pci_request_selected_regions(ha->pdev, ha->bars, 1600 if (pci_request_selected_regions(ha->pdev, ha->bars,
1523 QLA2XXX_DRIVER_NAME)) { 1601 QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
1571 } 1649 }
1572 1650
1573 /* Determine queue resources */ 1651 /* Determine queue resources */
1574 ha->max_queues = 1; 1652 ha->max_req_queues = ha->max_rsp_queues = 1;
1575 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1653 if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
1654 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1576 goto mqiobase_exit; 1655 goto mqiobase_exit;
1577 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1656 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1578 pci_resource_len(ha->pdev, 3)); 1657 pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
1582 ha->msix_count = msix; 1661 ha->msix_count = msix;
1583 /* Max queues are bounded by available msix vectors */ 1662 /* Max queues are bounded by available msix vectors */
1584 /* queue 0 uses two msix vectors */ 1663 /* queue 0 uses two msix vectors */
1585 if (ha->msix_count - 1 < ql2xmaxqueues) 1664 if (ql2xmultique_tag) {
1586 ha->max_queues = ha->msix_count - 1; 1665 cpus = num_online_cpus();
1587 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1666 ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
1588 ha->max_queues = QLA_MQ_SIZE; 1667 (cpus + 1) : (ha->msix_count - 1);
1589 else 1668 ha->max_req_queues = 2;
1590 ha->max_queues = ql2xmaxqueues; 1669 } else if (ql2xmaxqueues > 1) {
1670 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1671 QLA_MQ_SIZE : ql2xmaxqueues;
1672 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1673 " of request queues:%d\n", ha->max_req_queues));
1674 }
1591 qla_printk(KERN_INFO, ha, 1675 qla_printk(KERN_INFO, ha,
1592 "MSI-X vector count: %d\n", msix); 1676 "MSI-X vector count: %d\n", msix);
1593 } 1677 } else
1678 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1594 1679
1595mqiobase_exit: 1680mqiobase_exit:
1596 ha->msix_count = ha->max_queues + 1; 1681 ha->msix_count = ha->max_rsp_queues + 1;
1597 return (0); 1682 return (0);
1598 1683
1599iospace_error_exit: 1684iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1605{ 1690{
1606 scsi_qla_host_t *vha = shost_priv(shost); 1691 scsi_qla_host_t *vha = shost_priv(shost);
1607 1692
1693 if (vha->hw->flags.running_gold_fw)
1694 return;
1695
1608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1696 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1697 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1610 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1698 set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1768 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 1856 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1769 ha->gid_list_info_size = 8; 1857 ha->gid_list_info_size = 8;
1770 ha->optrom_size = OPTROM_SIZE_81XX; 1858 ha->optrom_size = OPTROM_SIZE_81XX;
1859 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1771 ha->isp_ops = &qla81xx_isp_ops; 1860 ha->isp_ops = &qla81xx_isp_ops;
1772 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 1861 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1773 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 1862 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1803 1892
1804 ret = -ENOMEM; 1893 ret = -ENOMEM;
1805 qla2x00_mem_free(ha); 1894 qla2x00_mem_free(ha);
1806 qla2x00_free_que(ha, req, rsp); 1895 qla2x00_free_req_que(ha, req);
1896 qla2x00_free_rsp_que(ha, rsp);
1807 goto probe_hw_failed; 1897 goto probe_hw_failed;
1808 } 1898 }
1809 1899
1810 pci_set_drvdata(pdev, base_vha); 1900 pci_set_drvdata(pdev, base_vha);
1811 1901
1812 host = base_vha->host; 1902 host = base_vha->host;
1813 base_vha->req_ques[0] = req->id; 1903 base_vha->req = req;
1814 host->can_queue = req->length + 128; 1904 host->can_queue = req->length + 128;
1815 if (IS_QLA2XXX_MIDTYPE(ha)) 1905 if (IS_QLA2XXX_MIDTYPE(ha))
1816 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1906 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1841 } 1931 }
1842 ha->rsp_q_map[0] = rsp; 1932 ha->rsp_q_map[0] = rsp;
1843 ha->req_q_map[0] = req; 1933 ha->req_q_map[0] = req;
1844 1934 rsp->req = req;
1935 req->rsp = rsp;
1936 set_bit(0, ha->req_qid_map);
1937 set_bit(0, ha->rsp_qid_map);
1845 /* FWI2-capable only. */ 1938 /* FWI2-capable only. */
1846 req->req_q_in = &ha->iobase->isp24.req_q_in; 1939 req->req_q_in = &ha->iobase->isp24.req_q_in;
1847 req->req_q_out = &ha->iobase->isp24.req_q_out; 1940 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1866 goto probe_failed; 1959 goto probe_failed;
1867 } 1960 }
1868 1961
1962 if (ha->mqenable)
1963 if (qla25xx_setup_mode(base_vha))
1964 qla_printk(KERN_WARNING, ha,
1965 "Can't create queues, falling back to single"
1966 " queue mode\n");
1967
1968 if (ha->flags.running_gold_fw)
1969 goto skip_dpc;
1970
1869 /* 1971 /*
1870 * Startup the kernel thread for this host adapter 1972 * Startup the kernel thread for this host adapter
1871 */ 1973 */
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1878 goto probe_failed; 1980 goto probe_failed;
1879 } 1981 }
1880 1982
1983skip_dpc:
1881 list_add_tail(&base_vha->list, &ha->vp_list); 1984 list_add_tail(&base_vha->list, &ha->vp_list);
1882 base_vha->host->irq = ha->pdev->irq; 1985 base_vha->host->irq = ha->pdev->irq;
1883 1986
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1917 return 0; 2020 return 0;
1918 2021
1919probe_init_failed: 2022probe_init_failed:
1920 qla2x00_free_que(ha, req, rsp); 2023 qla2x00_free_req_que(ha, req);
1921 ha->max_queues = 0; 2024 qla2x00_free_rsp_que(ha, rsp);
2025 ha->max_req_queues = ha->max_rsp_queues = 0;
1922 2026
1923probe_failed: 2027probe_failed:
1924 if (base_vha->timer_active) 2028 if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
1976 2080
1977 base_vha->flags.online = 0; 2081 base_vha->flags.online = 0;
1978 2082
2083 /* Flush the work queue and remove it */
2084 if (ha->wq) {
2085 flush_workqueue(ha->wq);
2086 destroy_workqueue(ha->wq);
2087 ha->wq = NULL;
2088 }
2089
1979 /* Kill the kernel thread for this host */ 2090 /* Kill the kernel thread for this host */
1980 if (ha->dpc_thread) { 2091 if (ha->dpc_thread) {
1981 struct task_struct *t = ha->dpc_thread; 2092 struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2017{ 2128{
2018 struct qla_hw_data *ha = vha->hw; 2129 struct qla_hw_data *ha = vha->hw;
2019 2130
2131 qla25xx_delete_queues(vha);
2132
2020 if (ha->flags.fce_enabled) 2133 if (ha->flags.fce_enabled)
2021 qla2x00_disable_fce_trace(vha, NULL, NULL); 2134 qla2x00_disable_fce_trace(vha, NULL, NULL);
2022 2135
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2329 vfree(ha->fw_dump); 2442 vfree(ha->fw_dump);
2330 } 2443 }
2331 2444
2445 if (ha->dcbx_tlv)
2446 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2447 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2448
2449 if (ha->xgmac_data)
2450 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2451 ha->xgmac_data, ha->xgmac_data_dma);
2452
2332 if (ha->sns_cmd) 2453 if (ha->sns_cmd)
2333 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2454 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2334 ha->sns_cmd, ha->sns_cmd_dma); 2455 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2412 INIT_LIST_HEAD(&vha->work_list); 2533 INIT_LIST_HEAD(&vha->work_list);
2413 INIT_LIST_HEAD(&vha->list); 2534 INIT_LIST_HEAD(&vha->list);
2414 2535
2536 spin_lock_init(&vha->work_lock);
2537
2415 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 2538 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2416 return vha; 2539 return vha;
2417 2540
@@ -2420,13 +2543,11 @@ fail:
2420} 2543}
2421 2544
2422static struct qla_work_evt * 2545static struct qla_work_evt *
2423qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, 2546qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2424 int locked)
2425{ 2547{
2426 struct qla_work_evt *e; 2548 struct qla_work_evt *e;
2427 2549
2428 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2550 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2429 GFP_KERNEL);
2430 if (!e) 2551 if (!e)
2431 return NULL; 2552 return NULL;
2432 2553
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2437} 2558}
2438 2559
2439static int 2560static int
2440qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) 2561qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2441{ 2562{
2442 unsigned long uninitialized_var(flags); 2563 unsigned long flags;
2443 struct qla_hw_data *ha = vha->hw;
2444 2564
2445 if (!locked) 2565 spin_lock_irqsave(&vha->work_lock, flags);
2446 spin_lock_irqsave(&ha->hardware_lock, flags);
2447 list_add_tail(&e->list, &vha->work_list); 2566 list_add_tail(&e->list, &vha->work_list);
2567 spin_unlock_irqrestore(&vha->work_lock, flags);
2448 qla2xxx_wake_dpc(vha); 2568 qla2xxx_wake_dpc(vha);
2449 if (!locked) 2569
2450 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2451 return QLA_SUCCESS; 2570 return QLA_SUCCESS;
2452} 2571}
2453 2572
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2457{ 2576{
2458 struct qla_work_evt *e; 2577 struct qla_work_evt *e;
2459 2578
2460 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); 2579 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2461 if (!e) 2580 if (!e)
2462 return QLA_FUNCTION_FAILED; 2581 return QLA_FUNCTION_FAILED;
2463 2582
2464 e->u.aen.code = code; 2583 e->u.aen.code = code;
2465 e->u.aen.data = data; 2584 e->u.aen.data = data;
2466 return qla2x00_post_work(vha, e, 1); 2585 return qla2x00_post_work(vha, e);
2467} 2586}
2468 2587
2469int 2588int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2471{ 2590{
2472 struct qla_work_evt *e; 2591 struct qla_work_evt *e;
2473 2592
2474 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); 2593 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2475 if (!e) 2594 if (!e)
2476 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2477 2596
2478 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 2597 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2479 return qla2x00_post_work(vha, e, 1); 2598 return qla2x00_post_work(vha, e);
2480} 2599}
2481 2600
2482static void 2601static void
2483qla2x00_do_work(struct scsi_qla_host *vha) 2602qla2x00_do_work(struct scsi_qla_host *vha)
2484{ 2603{
2485 struct qla_work_evt *e; 2604 struct qla_work_evt *e, *tmp;
2486 struct qla_hw_data *ha = vha->hw; 2605 unsigned long flags;
2606 LIST_HEAD(work);
2487 2607
2488 spin_lock_irq(&ha->hardware_lock); 2608 spin_lock_irqsave(&vha->work_lock, flags);
2489 while (!list_empty(&vha->work_list)) { 2609 list_splice_init(&vha->work_list, &work);
2490 e = list_entry(vha->work_list.next, struct qla_work_evt, list); 2610 spin_unlock_irqrestore(&vha->work_lock, flags);
2611
2612 list_for_each_entry_safe(e, tmp, &work, list) {
2491 list_del_init(&e->list); 2613 list_del_init(&e->list);
2492 spin_unlock_irq(&ha->hardware_lock);
2493 2614
2494 switch (e->type) { 2615 switch (e->type) {
2495 case QLA_EVT_AEN: 2616 case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2502 } 2623 }
2503 if (e->flags & QLA_EVT_FLAG_FREE) 2624 if (e->flags & QLA_EVT_FLAG_FREE)
2504 kfree(e); 2625 kfree(e);
2505 spin_lock_irq(&ha->hardware_lock);
2506 } 2626 }
2507 spin_unlock_irq(&ha->hardware_lock);
2508} 2627}
2628
2509/* Relogins all the fcports of a vport 2629/* Relogins all the fcports of a vport
2510 * Context: dpc thread 2630 * Context: dpc thread
2511 */ 2631 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 152ecfc26cd2..6260505dceb5 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
219 wait_cnt = NVR_WAIT_CNT; 219 wait_cnt = NVR_WAIT_CNT;
220 do { 220 do {
221 if (!--wait_cnt) { 221 if (!--wait_cnt) {
222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(qla_printk(KERN_WARNING, ha,
223 __func__, vha->host_no)); 223 "NVRAM didn't go ready...\n"));
224 break; 224 break;
225 } 225 }
226 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
349 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
350 do { 350 do {
351 if (!--wait_cnt) { 351 if (!--wait_cnt) {
352 DEBUG9_10(qla_printk( 352 DEBUG9_10(qla_printk(KERN_WARNING, ha,
353 "NVRAM didn't go ready...\n")); 353 "NVRAM didn't go ready...\n"));
354 break; 354 break;
355 } 355 }
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
408 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
409 do { 409 do {
410 if (!--wait_cnt) { 410 if (!--wait_cnt) {
411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); 411 DEBUG9_10(qla_printk(KERN_WARNING, ha,
412 "NVRAM didn't go ready...\n"));
412 break; 413 break;
413 } 414 }
414 NVRAM_DELAY(); 415 NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
701 break; 702 break;
702 case FLT_REG_VPD_0: 703 case FLT_REG_VPD_0:
703 ha->flt_region_vpd_nvram = start; 704 ha->flt_region_vpd_nvram = start;
704 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 705 if (ha->flags.port0)
705 ha->flt_region_vpd = start; 706 ha->flt_region_vpd = start;
706 break; 707 break;
707 case FLT_REG_VPD_1: 708 case FLT_REG_VPD_1:
708 if (PCI_FUNC(ha->pdev->devfn) & 1) 709 if (!ha->flags.port0)
709 ha->flt_region_vpd = start; 710 ha->flt_region_vpd = start;
710 break; 711 break;
711 case FLT_REG_NVRAM_0: 712 case FLT_REG_NVRAM_0:
712 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 713 if (ha->flags.port0)
713 ha->flt_region_nvram = start; 714 ha->flt_region_nvram = start;
714 break; 715 break;
715 case FLT_REG_NVRAM_1: 716 case FLT_REG_NVRAM_1:
716 if (PCI_FUNC(ha->pdev->devfn) & 1) 717 if (!ha->flags.port0)
717 ha->flt_region_nvram = start; 718 ha->flt_region_nvram = start;
718 break; 719 break;
719 case FLT_REG_FDT: 720 case FLT_REG_FDT:
720 ha->flt_region_fdt = start; 721 ha->flt_region_fdt = start;
721 break; 722 break;
722 case FLT_REG_NPIV_CONF_0: 723 case FLT_REG_NPIV_CONF_0:
723 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 724 if (ha->flags.port0)
724 ha->flt_region_npiv_conf = start; 725 ha->flt_region_npiv_conf = start;
725 break; 726 break;
726 case FLT_REG_NPIV_CONF_1: 727 case FLT_REG_NPIV_CONF_1:
727 if (PCI_FUNC(ha->pdev->devfn) & 1) 728 if (!ha->flags.port0)
728 ha->flt_region_npiv_conf = start; 729 ha->flt_region_npiv_conf = start;
729 break; 730 break;
731 case FLT_REG_GOLD_FW:
732 ha->flt_region_gold_fw = start;
733 break;
730 } 734 }
731 } 735 }
732 goto done; 736 goto done;
@@ -744,12 +748,12 @@ no_flash_data:
744 ha->flt_region_fw = def_fw[def]; 748 ha->flt_region_fw = def_fw[def];
745 ha->flt_region_boot = def_boot[def]; 749 ha->flt_region_boot = def_boot[def];
746 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 750 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
747 ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 751 ha->flt_region_vpd = ha->flags.port0 ?
748 def_vpd0[def]: def_vpd1[def]; 752 def_vpd0[def]: def_vpd1[def];
749 ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 753 ha->flt_region_nvram = ha->flags.port0 ?
750 def_nvram0[def]: def_nvram1[def]; 754 def_nvram0[def]: def_nvram1[def];
751 ha->flt_region_fdt = def_fdt[def]; 755 ha->flt_region_fdt = def_fdt[def];
752 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 756 ha->flt_region_npiv_conf = ha->flags.port0 ?
753 def_npiv_conf0[def]: def_npiv_conf1[def]; 757 def_npiv_conf0[def]: def_npiv_conf1[def];
754done: 758done:
755 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 759 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
924 struct fc_vport_identifiers vid; 928 struct fc_vport_identifiers vid;
925 struct fc_vport *vport; 929 struct fc_vport *vport;
926 930
931 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
932
927 flags = le16_to_cpu(entry->flags); 933 flags = le16_to_cpu(entry->flags);
928 if (flags == 0xffff) 934 if (flags == 0xffff)
929 continue; 935 continue;
@@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
937 vid.port_name = wwn_to_u64(entry->port_name); 943 vid.port_name = wwn_to_u64(entry->port_name);
938 vid.node_name = wwn_to_u64(entry->node_name); 944 vid.node_name = wwn_to_u64(entry->node_name);
939 945
940 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); 946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
941
942 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
943 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
944 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), 948 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
945 entry->q_qos, entry->f_qos)); 949 entry->q_qos, entry->f_qos));
@@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
955 } 959 }
956done: 960done:
957 kfree(data); 961 kfree(data);
958 ha->npiv_info = NULL;
959} 962}
960 963
961static int 964static int
@@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1079 0xff0000) | ((fdata >> 16) & 0xff)); 1082 0xff0000) | ((fdata >> 16) & 0xff));
1080 ret = qla24xx_erase_sector(vha, fdata); 1083 ret = qla24xx_erase_sector(vha, fdata);
1081 if (ret != QLA_SUCCESS) { 1084 if (ret != QLA_SUCCESS) {
1082 DEBUG9(qla_printk("Unable to erase sector: " 1085 DEBUG9(qla_printk(KERN_WARNING, ha,
1083 "address=%x.\n", faddr)); 1086 "Unable to erase sector: address=%x.\n",
1087 faddr));
1084 break; 1088 break;
1085 } 1089 }
1086 } 1090 }
@@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1240 ret = qla24xx_write_flash_dword(ha, 1244 ret = qla24xx_write_flash_dword(ha,
1241 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1245 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1242 if (ret != QLA_SUCCESS) { 1246 if (ret != QLA_SUCCESS) {
1243 DEBUG9(qla_printk("Unable to program nvram address=%x " 1247 DEBUG9(qla_printk(KERN_WARNING, ha,
1244 "data=%x.\n", naddr, *dwptr)); 1248 "Unable to program nvram address=%x data=%x.\n",
1249 naddr, *dwptr));
1245 break; 1250 break;
1246 } 1251 }
1247 } 1252 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 19d1afc3a343..b63feaf43126 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k1" 10#define QLA2XXX_VERSION "8.03.01-k3"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 166417a6afba..2de5f3ad640b 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1225 * @starget: SCSI target pointer 1225 * @starget: SCSI target pointer
1226 * @lun: SCSI Logical Unit Number 1226 * @lun: SCSI Logical Unit Number
1227 * 1227 *
1228 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1228 * Description: Looks up the scsi_device with the specified @lun for a given
1229 * for a given host. The returned scsi_device has an additional reference that 1229 * @starget. The returned scsi_device has an additional reference that
1230 * needs to be released with scsi_device_put once you're done with it. 1230 * needs to be released with scsi_device_put once you're done with it.
1231 **/ 1231 **/
1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 213123b0486b..41a21772df12 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
887static sector_t get_sdebug_capacity(void) 887static sector_t get_sdebug_capacity(void)
888{ 888{
889 if (scsi_debug_virtual_gb > 0) 889 if (scsi_debug_virtual_gb > 0)
890 return 2048 * 1024 * scsi_debug_virtual_gb; 890 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
891 else 891 else
892 return sdebug_store_sectors; 892 return sdebug_store_sectors;
893} 893}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0c2c73be1974..a1689353d7fd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
641/** 641/**
642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory 642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
643 * @scmd: SCSI command structure to restore 643 * @scmd: SCSI command structure to restore
644 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd 644 * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
645 * 645 *
646 * Undo any damage done by above scsi_prep_eh_cmnd(). 646 * Undo any damage done by above scsi_eh_prep_cmnd().
647 */ 647 */
648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
649{ 649{
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
1451 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
1452 * 1452 *
1453 * Locking: 1453 * Locking:
1454 * We must be called from process context; scsi_allocate_request() 1454 * We must be called from process context.
1455 * may sleep.
1456 * 1455 *
1457 * Notes: 1456 * Notes:
1458 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1457 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1459 * head of the devices request queue, and continue. 1458 * head of the devices request queue, and continue.
1460 *
1461 * Bugs:
1462 * scsi_allocate_request() may sleep waiting for existing requests to
1463 * be processed. However, since we haven't kicked off any request
1464 * processing for this host, this may deadlock.
1465 *
1466 * If scsi_allocate_request() fails for what ever reason, we
1467 * completely forget to lock the door.
1468 */ 1459 */
1469static void scsi_eh_lock_door(struct scsi_device *sdev) 1460static void scsi_eh_lock_door(struct scsi_device *sdev)
1470{ 1461{
1471 struct request *req; 1462 struct request *req;
1472 1463
1464 /*
1465 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1466 * request becomes available
1467 */
1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1468 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1474 if (!req)
1475 return;
1476 1469
1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1470 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1478 req->cmd[1] = 0; 1471 req->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dd3f9d2b99fd..30f3275e119e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2412,20 +2412,18 @@ int
2412scsi_internal_device_unblock(struct scsi_device *sdev) 2412scsi_internal_device_unblock(struct scsi_device *sdev)
2413{ 2413{
2414 struct request_queue *q = sdev->request_queue; 2414 struct request_queue *q = sdev->request_queue;
2415 int err;
2416 unsigned long flags; 2415 unsigned long flags;
2417 2416
2418 /* 2417 /*
2419 * Try to transition the scsi device to SDEV_RUNNING 2418 * Try to transition the scsi device to SDEV_RUNNING
2420 * and goose the device queue if successful. 2419 * and goose the device queue if successful.
2421 */ 2420 */
2422 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2421 if (sdev->sdev_state == SDEV_BLOCK)
2423 if (err) { 2422 sdev->sdev_state = SDEV_RUNNING;
2424 err = scsi_device_set_state(sdev, SDEV_CREATED); 2423 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2425 2424 sdev->sdev_state = SDEV_CREATED;
2426 if (err) 2425 else
2427 return err; 2426 return -EINVAL;
2428 }
2429 2427
2430 spin_lock_irqsave(q->queue_lock, flags); 2428 spin_lock_irqsave(q->queue_lock, flags);
2431 blk_start_queue(q); 2429 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e2b50d8f57a8..c44783801402 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
115 "REPORT LUNS maximum number of LUNS received (should be" 115 "REPORT LUNS maximum number of LUNS received (should be"
116 " between 1 and 16384)"); 116 " between 1 and 16384)");
117 117
118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; 118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
119 119
120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(inq_timeout, 121MODULE_PARM_DESC(inq_timeout,
122 "Timeout (in seconds) waiting for devices to answer INQUIRY." 122 "Timeout (in seconds) waiting for devices to answer INQUIRY."
123 " Default is 5. Some non-compliant devices need more."); 123 " Default is 20. Some devices may need more; most need less.");
124 124
125/* This lock protects only this list */ 125/* This lock protects only this list */
126static DEFINE_SPINLOCK(async_scan_lock); 126static DEFINE_SPINLOCK(async_scan_lock);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a2ce7b6325c..f3e664628d7a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
37#define ISCSI_TRANSPORT_VERSION "2.0-870" 37#define ISCSI_TRANSPORT_VERSION "2.0-870"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid;
41 struct scsi_transport_template t; 40 struct scsi_transport_template t;
42 struct iscsi_transport *iscsi_transport; 41 struct iscsi_transport *iscsi_transport;
43 struct list_head list; 42 struct list_head list;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
938} 937}
939 938
940static int 939static int
941iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) 940iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
942{ 941{
943 return netlink_broadcast(nls, skb, 0, 1, gfp); 942 return nlmsg_multicast(nls, skb, 0, group, gfp);
944}
945
946static int
947iscsi_unicast_skb(struct sk_buff *skb, int pid)
948{
949 int rc;
950
951 rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
952 if (rc < 0) {
953 printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
954 return rc;
955 }
956
957 return 0;
958} 943}
959 944
960int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 945int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
980 return -ENOMEM; 965 return -ENOMEM;
981 } 966 }
982 967
983 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 968 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
984 ev = NLMSG_DATA(nlh); 969 ev = NLMSG_DATA(nlh);
985 memset(ev, 0, sizeof(*ev)); 970 memset(ev, 0, sizeof(*ev));
986 ev->transport_handle = iscsi_handle(conn->transport); 971 ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
991 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 976 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
992 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 977 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
993 978
994 return iscsi_unicast_skb(skb, priv->daemon_pid); 979 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
995} 980}
996EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 981EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
997 982
983int iscsi_offload_mesg(struct Scsi_Host *shost,
984 struct iscsi_transport *transport, uint32_t type,
985 char *data, uint16_t data_size)
986{
987 struct nlmsghdr *nlh;
988 struct sk_buff *skb;
989 struct iscsi_uevent *ev;
990 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
991
992 skb = alloc_skb(len, GFP_NOIO);
993 if (!skb) {
994 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
995 return -ENOMEM;
996 }
997
998 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
999 ev = NLMSG_DATA(nlh);
1000 memset(ev, 0, sizeof(*ev));
1001 ev->type = type;
1002 ev->transport_handle = iscsi_handle(transport);
1003 switch (type) {
1004 case ISCSI_KEVENT_PATH_REQ:
1005 ev->r.req_path.host_no = shost->host_no;
1006 break;
1007 case ISCSI_KEVENT_IF_DOWN:
1008 ev->r.notify_if_down.host_no = shost->host_no;
1009 break;
1010 }
1011
1012 memcpy((char *)ev + sizeof(*ev), data, data_size);
1013
1014 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
1015}
1016EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1017
998void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) 1018void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
999{ 1019{
1000 struct nlmsghdr *nlh; 1020 struct nlmsghdr *nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1014 return; 1034 return;
1015 } 1035 }
1016 1036
1017 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1037 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1018 ev = NLMSG_DATA(nlh); 1038 ev = NLMSG_DATA(nlh);
1019 ev->transport_handle = iscsi_handle(conn->transport); 1039 ev->transport_handle = iscsi_handle(conn->transport);
1020 ev->type = ISCSI_KEVENT_CONN_ERROR; 1040 ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1022 ev->r.connerror.cid = conn->cid; 1042 ev->r.connerror.cid = conn->cid;
1023 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 1043 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
1024 1044
1025 iscsi_broadcast_skb(skb, GFP_ATOMIC); 1045 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
1026 1046
1027 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 1047 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
1028 error); 1048 error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1030EXPORT_SYMBOL_GPL(iscsi_conn_error_event); 1050EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1031 1051
1032static int 1052static int
1033iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, 1053iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1034 void *payload, int size) 1054 void *payload, int size)
1035{ 1055{
1036 struct sk_buff *skb; 1056 struct sk_buff *skb;
1037 struct nlmsghdr *nlh; 1057 struct nlmsghdr *nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
1045 return -ENOMEM; 1065 return -ENOMEM;
1046 } 1066 }
1047 1067
1048 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); 1068 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
1049 nlh->nlmsg_flags = flags; 1069 nlh->nlmsg_flags = flags;
1050 memcpy(NLMSG_DATA(nlh), payload, size); 1070 memcpy(NLMSG_DATA(nlh), payload, size);
1051 return iscsi_unicast_skb(skb, pid); 1071 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
1052} 1072}
1053 1073
1054static int 1074static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1085 return -ENOMEM; 1105 return -ENOMEM;
1086 } 1106 }
1087 1107
1088 nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, 1108 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
1089 (len - sizeof(*nlhstat)), 0); 1109 (len - sizeof(*nlhstat)), 0);
1090 evstat = NLMSG_DATA(nlhstat); 1110 evstat = NLMSG_DATA(nlhstat);
1091 memset(evstat, 0, sizeof(*evstat)); 1111 memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1109 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 1129 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
1110 nlhstat->nlmsg_len = actual_size; 1130 nlhstat->nlmsg_len = actual_size;
1111 1131
1112 err = iscsi_unicast_skb(skbstat, priv->daemon_pid); 1132 err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
1133 GFP_ATOMIC);
1113 } while (err < 0 && err != -ECONNREFUSED); 1134 } while (err < 0 && err != -ECONNREFUSED);
1114 1135
1115 return err; 1136 return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1143 return -ENOMEM; 1164 return -ENOMEM;
1144 } 1165 }
1145 1166
1146 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1167 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1147 ev = NLMSG_DATA(nlh); 1168 ev = NLMSG_DATA(nlh);
1148 ev->transport_handle = iscsi_handle(session->transport); 1169 ev->transport_handle = iscsi_handle(session->transport);
1149 1170
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1172 * this will occur if the daemon is not up, so we just warn 1193 * this will occur if the daemon is not up, so we just warn
1173 * the user and when the daemon is restarted it will handle it 1194 * the user and when the daemon is restarted it will handle it
1174 */ 1195 */
1175 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 1196 rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1176 if (rc == -ESRCH) 1197 if (rc == -ESRCH)
1177 iscsi_cls_session_printk(KERN_ERR, session, 1198 iscsi_cls_session_printk(KERN_ERR, session,
1178 "Cannot notify userspace of session " 1199 "Cannot notify userspace of session "
@@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1268 return err; 1289 return err;
1269} 1290}
1270 1291
1292static int iscsi_if_ep_connect(struct iscsi_transport *transport,
1293 struct iscsi_uevent *ev, int msg_type)
1294{
1295 struct iscsi_endpoint *ep;
1296 struct sockaddr *dst_addr;
1297 struct Scsi_Host *shost = NULL;
1298 int non_blocking, err = 0;
1299
1300 if (!transport->ep_connect)
1301 return -EINVAL;
1302
1303 if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
1304 shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
1305 if (!shost) {
1306 printk(KERN_ERR "ep connect failed. Could not find "
1307 "host no %u\n",
1308 ev->u.ep_connect_through_host.host_no);
1309 return -ENODEV;
1310 }
1311 non_blocking = ev->u.ep_connect_through_host.non_blocking;
1312 } else
1313 non_blocking = ev->u.ep_connect.non_blocking;
1314
1315 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1316 ep = transport->ep_connect(shost, dst_addr, non_blocking);
1317 if (IS_ERR(ep)) {
1318 err = PTR_ERR(ep);
1319 goto release_host;
1320 }
1321
1322 ev->r.ep_connect_ret.handle = ep->id;
1323release_host:
1324 if (shost)
1325 scsi_host_put(shost);
1326 return err;
1327}
1328
1271static int 1329static int
1272iscsi_if_transport_ep(struct iscsi_transport *transport, 1330iscsi_if_transport_ep(struct iscsi_transport *transport,
1273 struct iscsi_uevent *ev, int msg_type) 1331 struct iscsi_uevent *ev, int msg_type)
1274{ 1332{
1275 struct iscsi_endpoint *ep; 1333 struct iscsi_endpoint *ep;
1276 struct sockaddr *dst_addr;
1277 int rc = 0; 1334 int rc = 0;
1278 1335
1279 switch (msg_type) { 1336 switch (msg_type) {
1337 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1280 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1338 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1281 if (!transport->ep_connect) 1339 rc = iscsi_if_ep_connect(transport, ev, msg_type);
1282 return -EINVAL;
1283
1284 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1285 ep = transport->ep_connect(dst_addr,
1286 ev->u.ep_connect.non_blocking);
1287 if (IS_ERR(ep))
1288 return PTR_ERR(ep);
1289
1290 ev->r.ep_connect_ret.handle = ep->id;
1291 break; 1340 break;
1292 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1341 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1293 if (!transport->ep_poll) 1342 if (!transport->ep_poll)
@@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1365} 1414}
1366 1415
1367static int 1416static int
1368iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1417iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1418{
1419 struct Scsi_Host *shost;
1420 struct iscsi_path *params;
1421 int err;
1422
1423 if (!transport->set_path)
1424 return -ENOSYS;
1425
1426 shost = scsi_host_lookup(ev->u.set_path.host_no);
1427 if (!shost) {
1428 printk(KERN_ERR "set path could not find host no %u\n",
1429 ev->u.set_path.host_no);
1430 return -ENODEV;
1431 }
1432
1433 params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
1434 err = transport->set_path(shost, params);
1435
1436 scsi_host_put(shost);
1437 return err;
1438}
1439
1440static int
1441iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1369{ 1442{
1370 int err = 0; 1443 int err = 0;
1371 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 1444 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1375 struct iscsi_cls_conn *conn; 1448 struct iscsi_cls_conn *conn;
1376 struct iscsi_endpoint *ep = NULL; 1449 struct iscsi_endpoint *ep = NULL;
1377 1450
1451 if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
1452 *group = ISCSI_NL_GRP_UIP;
1453 else
1454 *group = ISCSI_NL_GRP_ISCSID;
1455
1378 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1456 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1379 if (!priv) 1457 if (!priv)
1380 return -EINVAL; 1458 return -EINVAL;
@@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1383 if (!try_module_get(transport->owner)) 1461 if (!try_module_get(transport->owner))
1384 return -EINVAL; 1462 return -EINVAL;
1385 1463
1386 priv->daemon_pid = NETLINK_CREDS(skb)->pid;
1387
1388 switch (nlh->nlmsg_type) { 1464 switch (nlh->nlmsg_type) {
1389 case ISCSI_UEVENT_CREATE_SESSION: 1465 case ISCSI_UEVENT_CREATE_SESSION:
1390 err = iscsi_if_create_session(priv, ep, ev, 1466 err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1469 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1545 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1470 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1546 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1471 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1547 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1548 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1472 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1549 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1473 break; 1550 break;
1474 case ISCSI_UEVENT_TGT_DSCVR: 1551 case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1477 case ISCSI_UEVENT_SET_HOST_PARAM: 1554 case ISCSI_UEVENT_SET_HOST_PARAM:
1478 err = iscsi_set_host_param(transport, ev); 1555 err = iscsi_set_host_param(transport, ev);
1479 break; 1556 break;
1557 case ISCSI_UEVENT_PATH_UPDATE:
1558 err = iscsi_set_path(transport, ev);
1559 break;
1480 default: 1560 default:
1481 err = -ENOSYS; 1561 err = -ENOSYS;
1482 break; 1562 break;
@@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
1499 uint32_t rlen; 1579 uint32_t rlen;
1500 struct nlmsghdr *nlh; 1580 struct nlmsghdr *nlh;
1501 struct iscsi_uevent *ev; 1581 struct iscsi_uevent *ev;
1582 uint32_t group;
1502 1583
1503 nlh = nlmsg_hdr(skb); 1584 nlh = nlmsg_hdr(skb);
1504 if (nlh->nlmsg_len < sizeof(*nlh) || 1585 if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
1511 if (rlen > skb->len) 1592 if (rlen > skb->len)
1512 rlen = skb->len; 1593 rlen = skb->len;
1513 1594
1514 err = iscsi_if_recv_msg(skb, nlh); 1595 err = iscsi_if_recv_msg(skb, nlh, &group);
1515 if (err) { 1596 if (err) {
1516 ev->type = ISCSI_KEVENT_IF_ERROR; 1597 ev->type = ISCSI_KEVENT_IF_ERROR;
1517 ev->iferror = err; 1598 ev->iferror = err;
@@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
1525 */ 1606 */
1526 if (ev->type == ISCSI_UEVENT_GET_STATS && !err) 1607 if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
1527 break; 1608 break;
1528 err = iscsi_if_send_reply( 1609 err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
1529 NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
1530 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 1610 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
1531 } while (err < 0 && err != -ECONNREFUSED); 1611 } while (err < 0 && err != -ECONNREFUSED);
1532 skb_pull(skb, rlen); 1612 skb_pull(skb, rlen);
@@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1774 if (!priv) 1854 if (!priv)
1775 return NULL; 1855 return NULL;
1776 INIT_LIST_HEAD(&priv->list); 1856 INIT_LIST_HEAD(&priv->list);
1777 priv->daemon_pid = -1;
1778 priv->iscsi_transport = tt; 1857 priv->iscsi_transport = tt;
1779 priv->t.user_scan = iscsi_user_scan; 1858 priv->t.user_scan = iscsi_user_scan;
1780 priv->t.create_work_queue = 1; 1859 priv->t.create_work_queue = 1;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bcf3bd40bbd5..878b17a9af30 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1902 index = sdkp->index; 1902 index = sdkp->index;
1903 dev = &sdp->sdev_gendev; 1903 dev = &sdp->sdev_gendev;
1904 1904
1905 if (!sdp->request_queue->rq_timeout) {
1906 if (sdp->type != TYPE_MOD)
1907 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1908 else
1909 blk_queue_rq_timeout(sdp->request_queue,
1910 SD_MOD_TIMEOUT);
1911 }
1912
1913 device_initialize(&sdkp->dev);
1914 sdkp->dev.parent = &sdp->sdev_gendev;
1915 sdkp->dev.class = &sd_disk_class;
1916 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1917
1918 if (device_add(&sdkp->dev))
1919 goto out_free_index;
1920
1921 get_device(&sdp->sdev_gendev);
1922
1923 if (index < SD_MAX_DISKS) { 1905 if (index < SD_MAX_DISKS) {
1924 gd->major = sd_major((index & 0xf0) >> 4); 1906 gd->major = sd_major((index & 0xf0) >> 4);
1925 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1907 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1954 1936
1955 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1937 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1956 sdp->removable ? "removable " : ""); 1938 sdp->removable ? "removable " : "");
1957
1958 return;
1959
1960 out_free_index:
1961 ida_remove(&sd_index_ida, index);
1962} 1939}
1963 1940
1964/** 1941/**
@@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev)
2026 sdkp->openers = 0; 2003 sdkp->openers = 0;
2027 sdkp->previous_state = 1; 2004 sdkp->previous_state = 1;
2028 2005
2006 if (!sdp->request_queue->rq_timeout) {
2007 if (sdp->type != TYPE_MOD)
2008 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2009 else
2010 blk_queue_rq_timeout(sdp->request_queue,
2011 SD_MOD_TIMEOUT);
2012 }
2013
2014 device_initialize(&sdkp->dev);
2015 sdkp->dev.parent = &sdp->sdev_gendev;
2016 sdkp->dev.class = &sd_disk_class;
2017 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
2018
2019 if (device_add(&sdkp->dev))
2020 goto out_free_index;
2021
2022 get_device(&sdp->sdev_gendev);
2023
2029 async_schedule(sd_probe_async, sdkp); 2024 async_schedule(sd_probe_async, sdkp);
2030 2025
2031 return 0; 2026 return 0;
@@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev)
2055 **/ 2050 **/
2056static int sd_remove(struct device *dev) 2051static int sd_remove(struct device *dev)
2057{ 2052{
2058 struct scsi_disk *sdkp = dev_get_drvdata(dev); 2053 struct scsi_disk *sdkp;
2059 2054
2055 async_synchronize_full();
2056 sdkp = dev_get_drvdata(dev);
2060 device_del(&sdkp->dev); 2057 device_del(&sdkp->dev);
2061 del_gendisk(sdkp->disk); 2058 del_gendisk(sdkp->disk);
2062 sd_shutdown(dev); 2059 sd_shutdown(dev);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 89bd438e1fe3..b33d04250bbc 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2964 !(STp->use_pf & PF_TESTED)) { 2964 !(STp->use_pf & PF_TESTED)) {
2965 /* Try the other possible state of Page Format if not 2965 /* Try the other possible state of Page Format if not
2966 already tried */ 2966 already tried */
2967 STp->use_pf = !STp->use_pf | PF_TESTED; 2967 STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
2968 st_release_request(SRpnt); 2968 st_release_request(SRpnt);
2969 SRpnt = NULL; 2969 SRpnt = NULL;
2970 return st_int_ioctl(STp, cmd_in, arg); 2970 return st_int_ioctl(STp, cmd_in, arg);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 583966ec8266..45374d66d26a 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
737 struct sym_hcb *np = sym_get_hcb(sdev->host); 737 struct sym_hcb *np = sym_get_hcb(sdev->host);
738 struct sym_tcb *tp = &np->target[sdev->id]; 738 struct sym_tcb *tp = &np->target[sdev->id];
739 struct sym_lcb *lp; 739 struct sym_lcb *lp;
740 unsigned long flags;
741 int error;
740 742
741 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 743 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
742 return -ENXIO; 744 return -ENXIO;
743 745
744 tp->starget = sdev->sdev_target; 746 spin_lock_irqsave(np->s.host->host_lock, flags);
747
745 /* 748 /*
746 * Fail the device init if the device is flagged NOSCAN at BOOT in 749 * Fail the device init if the device is flagged NOSCAN at BOOT in
747 * the NVRAM. This may speed up boot and maintain coherency with 750 * the NVRAM. This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
753 756
754 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 757 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
755 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 758 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
756 starget_printk(KERN_INFO, tp->starget, 759 starget_printk(KERN_INFO, sdev->sdev_target,
757 "Scan at boot disabled in NVRAM\n"); 760 "Scan at boot disabled in NVRAM\n");
758 return -ENXIO; 761 error = -ENXIO;
762 goto out;
759 } 763 }
760 764
761 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 765 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
762 if (sdev->lun != 0) 766 if (sdev->lun != 0) {
763 return -ENXIO; 767 error = -ENXIO;
764 starget_printk(KERN_INFO, tp->starget, 768 goto out;
769 }
770 starget_printk(KERN_INFO, sdev->sdev_target,
765 "Multiple LUNs disabled in NVRAM\n"); 771 "Multiple LUNs disabled in NVRAM\n");
766 } 772 }
767 773
768 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 774 lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
769 if (!lp) 775 if (!lp) {
770 return -ENOMEM; 776 error = -ENOMEM;
777 goto out;
778 }
779 if (tp->nlcb == 1)
780 tp->starget = sdev->sdev_target;
771 781
772 spi_min_period(tp->starget) = tp->usr_period; 782 spi_min_period(tp->starget) = tp->usr_period;
773 spi_max_width(tp->starget) = tp->usr_width; 783 spi_max_width(tp->starget) = tp->usr_width;
774 784
775 return 0; 785 error = 0;
786out:
787 spin_unlock_irqrestore(np->s.host->host_lock, flags);
788
789 return error;
776} 790}
777 791
778/* 792/*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
819static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 833static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
820{ 834{
821 struct sym_hcb *np = sym_get_hcb(sdev->host); 835 struct sym_hcb *np = sym_get_hcb(sdev->host);
822 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 836 struct sym_tcb *tp = &np->target[sdev->id];
837 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
838 unsigned long flags;
839
840 spin_lock_irqsave(np->s.host->host_lock, flags);
841
842 if (lp->busy_itlq || lp->busy_itl) {
843 /*
844 * This really shouldn't happen, but we can't return an error
845 * so let's try to stop all on-going I/O.
846 */
847 starget_printk(KERN_WARNING, tp->starget,
848 "Removing busy LCB (%d)\n", sdev->lun);
849 sym_reset_scsi_bus(np, 1);
850 }
823 851
824 if (lp->itlq_tbl) 852 if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
825 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 853 /*
826 kfree(lp->cb_tags); 854 * It was the last unit for this target.
827 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 855 */
856 tp->head.sval = 0;
857 tp->head.wval = np->rv_scntl3;
858 tp->head.uval = 0;
859 tp->tgoal.check_nego = 1;
860 tp->starget = NULL;
861 }
862
863 spin_unlock_irqrestore(np->s.host->host_lock, flags);
828} 864}
829 865
830/* 866/*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
890 if (!((uc->target >> t) & 1)) 926 if (!((uc->target >> t) & 1))
891 continue; 927 continue;
892 tp = &np->target[t]; 928 tp = &np->target[t];
929 if (!tp->nlcb)
930 continue;
893 931
894 switch (uc->cmd) { 932 switch (uc->cmd) {
895 933
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ffa70d1ed182..69ad4945c936 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
1896 tp->head.sval = 0; 1896 tp->head.sval = 0;
1897 tp->head.wval = np->rv_scntl3; 1897 tp->head.wval = np->rv_scntl3;
1898 tp->head.uval = 0; 1898 tp->head.uval = 0;
1899 if (tp->lun0p)
1900 tp->lun0p->to_clear = 0;
1901 if (tp->lunmp) {
1902 int ln;
1903
1904 for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
1905 if (tp->lunmp[ln])
1906 tp->lunmp[ln]->to_clear = 0;
1907 }
1899 } 1908 }
1900 1909
1901 /* 1910 /*
@@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
4988 */ 4997 */
4989 if (ln && !tp->lunmp) { 4998 if (ln && !tp->lunmp) {
4990 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), 4999 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
4991 GFP_KERNEL); 5000 GFP_ATOMIC);
4992 if (!tp->lunmp) 5001 if (!tp->lunmp)
4993 goto fail; 5002 goto fail;
4994 } 5003 }
@@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
5008 tp->lun0p = lp; 5017 tp->lun0p = lp;
5009 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 5018 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
5010 } 5019 }
5020 tp->nlcb++;
5011 5021
5012 /* 5022 /*
5013 * Let the itl task point to error handling. 5023 * Let the itl task point to error handling.
@@ -5085,6 +5095,43 @@ fail:
5085} 5095}
5086 5096
5087/* 5097/*
5098 * Lun control block deallocation. Returns the number of valid remaing LCBs
5099 * for the target.
5100 */
5101int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
5102{
5103 struct sym_tcb *tp = &np->target[tn];
5104 struct sym_lcb *lp = sym_lp(tp, ln);
5105
5106 tp->nlcb--;
5107
5108 if (ln) {
5109 if (!tp->nlcb) {
5110 kfree(tp->lunmp);
5111 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5112 tp->lunmp = NULL;
5113 tp->luntbl = NULL;
5114 tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
5115 } else {
5116 tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
5117 tp->lunmp[ln] = NULL;
5118 }
5119 } else {
5120 tp->lun0p = NULL;
5121 tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
5122 }
5123
5124 if (lp->itlq_tbl) {
5125 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5126 kfree(lp->cb_tags);
5127 }
5128
5129 sym_mfree_dma(lp, sizeof(*lp), "LCB");
5130
5131 return tp->nlcb;
5132}
5133
5134/*
5088 * Queue a SCSI IO to the controller. 5135 * Queue a SCSI IO to the controller.
5089 */ 5136 */
5090int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 5137int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 9ebc8706b6bf..053e63c86822 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -401,6 +401,7 @@ struct sym_tcb {
401 * An array of bus addresses is used on reselection. 401 * An array of bus addresses is used on reselection.
402 */ 402 */
403 u32 *luntbl; /* LCBs bus address table */ 403 u32 *luntbl; /* LCBs bus address table */
404 int nlcb; /* Number of valid LCBs (including LUN #0) */
404 405
405 /* 406 /*
406 * LUN table used by the C code. 407 * LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
1065struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); 1066struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1066void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); 1067void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1067struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); 1068struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1069int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1068int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); 1070int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1069int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); 1071int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1070int sym_reset_scsi_target(struct sym_hcb *np, int target); 1072int sym_reset_scsi_target(struct sym_hcb *np, int target);
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index b1512c4bb8c7..24667eedc023 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -175,10 +175,4 @@ int exofs_async_op(struct osd_request *or,
175 175
176int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr); 176int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr);
177 177
178int osd_req_read_kern(struct osd_request *or,
179 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
180
181int osd_req_write_kern(struct osd_request *or,
182 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
183
184#endif /*ifndef __EXOFS_COM_H__*/ 178#endif /*ifndef __EXOFS_COM_H__*/
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index ba8d9fab4693..77d0a295eb1c 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -59,10 +59,9 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
59 struct inode *inode) 59 struct inode *inode)
60{ 60{
61 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; 61 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
62 struct request_queue *req_q = sbi->s_dev->scsi_device->request_queue;
63 62
64 pcol->sbi = sbi; 63 pcol->sbi = sbi;
65 pcol->req_q = req_q; 64 pcol->req_q = osd_request_queue(sbi->s_dev);
66 pcol->inode = inode; 65 pcol->inode = inode;
67 pcol->expected_pages = expected_pages; 66 pcol->expected_pages = expected_pages;
68 67
@@ -266,7 +265,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
266 goto err; 265 goto err;
267 } 266 }
268 267
269 osd_req_read(or, &obj, pcol->bio, i_start); 268 osd_req_read(or, &obj, i_start, pcol->bio, pcol->length);
270 269
271 if (is_sync) { 270 if (is_sync) {
272 exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred); 271 exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred);
@@ -522,7 +521,8 @@ static int write_exec(struct page_collect *pcol)
522 521
523 *pcol_copy = *pcol; 522 *pcol_copy = *pcol;
524 523
525 osd_req_write(or, &obj, pcol_copy->bio, i_start); 524 pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
525 osd_req_write(or, &obj, i_start, pcol_copy->bio, pcol_copy->length);
526 ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred); 526 ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred);
527 if (unlikely(ret)) { 527 if (unlikely(ret)) {
528 EXOFS_ERR("write_exec: exofs_async_op() Faild\n"); 528 EXOFS_ERR("write_exec: exofs_async_op() Faild\n");
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index 06ca92672eb5..b3d2ccb87aaa 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -125,29 +125,3 @@ int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
125 125
126 return -EIO; 126 return -EIO;
127} 127}
128
129int osd_req_read_kern(struct osd_request *or,
130 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
131{
132 struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
133 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
134
135 if (!bio)
136 return -ENOMEM;
137
138 osd_req_read(or, obj, bio, offset);
139 return 0;
140}
141
142int osd_req_write_kern(struct osd_request *or,
143 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
144{
145 struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
146 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
147
148 if (!bio)
149 return -ENOMEM;
150
151 osd_req_write(or, obj, bio, offset);
152 return 0;
153}
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index cfe4fe1b7132..60e8934d10b5 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -79,6 +79,7 @@
79#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 79#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
80#define ETH_P_TIPC 0x88CA /* TIPC */ 80#define ETH_P_TIPC 0x88CA /* TIPC */
81#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ 81#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
82#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
82#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ 83#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
83 84
84/* 85/*
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
index 0627a9ae6347..3d138c1fcf8a 100644
--- a/include/scsi/fc/fc_fip.h
+++ b/include/scsi/fc/fc_fip.h
@@ -22,13 +22,6 @@
22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf 22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
23 */ 23 */
24 24
25/*
26 * The FIP ethertype eventually goes in net/if_ether.h.
27 */
28#ifndef ETH_P_FIP
29#define ETH_P_FIP 0x8914 /* FIP Ethertype */
30#endif
31
32#define FIP_DEF_PRI 128 /* default selection priority */ 25#define FIP_DEF_PRI 128 /* default selection priority */
33#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */ 26#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
34#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */ 27#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index d0ed5226f8c4..4426f00da5ff 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -22,6 +22,11 @@
22#define ISCSI_IF_H 22#define ISCSI_IF_H
23 23
24#include <scsi/iscsi_proto.h> 24#include <scsi/iscsi_proto.h>
25#include <linux/in.h>
26#include <linux/in6.h>
27
28#define ISCSI_NL_GRP_ISCSID 1
29#define ISCSI_NL_GRP_UIP 2
25 30
26#define UEVENT_BASE 10 31#define UEVENT_BASE 10
27#define KEVENT_BASE 100 32#define KEVENT_BASE 100
@@ -50,7 +55,10 @@ enum iscsi_uevent_e {
50 ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, 55 ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
51 ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, 56 ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
52 ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, 57 ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
53 ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18, 58 ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
59 ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19,
60
61 ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20,
54 62
55 /* up events */ 63 /* up events */
56 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 64 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -59,6 +67,9 @@ enum iscsi_uevent_e {
59 ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, 67 ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
60 ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5, 68 ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
61 ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6, 69 ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
70
71 ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7,
72 ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8,
62}; 73};
63 74
64enum iscsi_tgt_dscvr { 75enum iscsi_tgt_dscvr {
@@ -131,6 +142,10 @@ struct iscsi_uevent {
131 struct msg_transport_connect { 142 struct msg_transport_connect {
132 uint32_t non_blocking; 143 uint32_t non_blocking;
133 } ep_connect; 144 } ep_connect;
145 struct msg_transport_connect_through_host {
146 uint32_t host_no;
147 uint32_t non_blocking;
148 } ep_connect_through_host;
134 struct msg_transport_poll { 149 struct msg_transport_poll {
135 uint64_t ep_handle; 150 uint64_t ep_handle;
136 uint32_t timeout_ms; 151 uint32_t timeout_ms;
@@ -154,6 +169,9 @@ struct iscsi_uevent {
154 uint32_t param; /* enum iscsi_host_param */ 169 uint32_t param; /* enum iscsi_host_param */
155 uint32_t len; 170 uint32_t len;
156 } set_host_param; 171 } set_host_param;
172 struct msg_set_path {
173 uint32_t host_no;
174 } set_path;
157 } u; 175 } u;
158 union { 176 union {
159 /* messages k -> u */ 177 /* messages k -> u */
@@ -187,10 +205,39 @@ struct iscsi_uevent {
187 struct msg_transport_connect_ret { 205 struct msg_transport_connect_ret {
188 uint64_t handle; 206 uint64_t handle;
189 } ep_connect_ret; 207 } ep_connect_ret;
208 struct msg_req_path {
209 uint32_t host_no;
210 } req_path;
211 struct msg_notify_if_down {
212 uint32_t host_no;
213 } notify_if_down;
190 } r; 214 } r;
191} __attribute__ ((aligned (sizeof(uint64_t)))); 215} __attribute__ ((aligned (sizeof(uint64_t))));
192 216
193/* 217/*
218 * To keep the struct iscsi_uevent size the same for userspace code
219 * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and
220 * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the
221 * struct iscsi_uevent in the NETLINK_ISCSI message.
222 */
223struct iscsi_path {
224 uint64_t handle;
225 uint8_t mac_addr[6];
226 uint8_t mac_addr_old[6];
227 uint32_t ip_addr_len; /* 4 or 16 */
228 union {
229 struct in_addr v4_addr;
230 struct in6_addr v6_addr;
231 } src;
232 union {
233 struct in_addr v4_addr;
234 struct in6_addr v6_addr;
235 } dst;
236 uint16_t vlan_id;
237 uint16_t pmtu;
238} __attribute__ ((aligned (sizeof(uint64_t))));
239
240/*
194 * Common error codes 241 * Common error codes
195 */ 242 */
196enum iscsi_err { 243enum iscsi_err {
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 45f9cc642c46..ebdd9f4cf070 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -679,6 +679,7 @@ struct fc_lport {
679 unsigned int e_d_tov; 679 unsigned int e_d_tov;
680 unsigned int r_a_tov; 680 unsigned int r_a_tov;
681 u8 max_retry_count; 681 u8 max_retry_count;
682 u8 max_rport_retry_count;
682 u16 link_speed; 683 u16 link_speed;
683 u16 link_supported_speeds; 684 u16 link_supported_speeds;
684 u16 lro_xid; /* max xid for fcoe lro */ 685 u16 lro_xid; /* max xid for fcoe lro */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 0289f5745fb9..196525cd402f 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -82,9 +82,12 @@ enum {
82 82
83 83
84enum { 84enum {
85 ISCSI_TASK_FREE,
85 ISCSI_TASK_COMPLETED, 86 ISCSI_TASK_COMPLETED,
86 ISCSI_TASK_PENDING, 87 ISCSI_TASK_PENDING,
87 ISCSI_TASK_RUNNING, 88 ISCSI_TASK_RUNNING,
89 ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */
90 ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */
88}; 91};
89 92
90struct iscsi_r2t_info { 93struct iscsi_r2t_info {
@@ -181,9 +184,7 @@ struct iscsi_conn {
181 184
182 /* xmit */ 185 /* xmit */
183 struct list_head mgmtqueue; /* mgmt (control) xmit queue */ 186 struct list_head mgmtqueue; /* mgmt (control) xmit queue */
184 struct list_head mgmt_run_list; /* list of control tasks */ 187 struct list_head cmdqueue; /* data-path cmd queue */
185 struct list_head xmitqueue; /* data-path cmd queue */
186 struct list_head run_list; /* list of cmds in progress */
187 struct list_head requeue; /* tasks needing another run */ 188 struct list_head requeue; /* tasks needing another run */
188 struct work_struct xmitwork; /* per-conn. xmit workqueue */ 189 struct work_struct xmitwork; /* per-conn. xmit workqueue */
189 unsigned long suspend_tx; /* suspend Tx */ 190 unsigned long suspend_tx; /* suspend Tx */
@@ -406,6 +407,7 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
406 char *, int); 407 char *, int);
407extern int iscsi_verify_itt(struct iscsi_conn *, itt_t); 408extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
408extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t); 409extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
410extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
409extern void iscsi_requeue_task(struct iscsi_task *task); 411extern void iscsi_requeue_task(struct iscsi_task *task);
410extern void iscsi_put_task(struct iscsi_task *task); 412extern void iscsi_put_task(struct iscsi_task *task);
411extern void __iscsi_get_task(struct iscsi_task *task); 413extern void __iscsi_get_task(struct iscsi_task *task);
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h
index f888a6fda073..56e920ade326 100644
--- a/include/scsi/osd_attributes.h
+++ b/include/scsi/osd_attributes.h
@@ -29,6 +29,7 @@ enum {
29 OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1, 29 OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
30 OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2, 30 OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2,
31 OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3, 31 OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3,
32 OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4,
32 OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5, 33 OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5,
33 OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF, 34 OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF,
34 35
@@ -51,7 +52,9 @@ enum {
51 OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF, 52 OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF,
52 53
53 OSD_APAGE_COMMON_FIRST = 0xF0000000, 54 OSD_APAGE_COMMON_FIRST = 0xF0000000,
54 OSD_APAGE_COMMON_LAST = 0xFFFFFFFE, 55 OSD_APAGE_COMMON_LAST = 0xFFFFFFFD,
56
57 OSD_APAGE_CURRENT_COMMAND = 0xFFFFFFFE,
55 58
56 OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF, 59 OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF,
57}; 60};
@@ -106,10 +109,30 @@ enum {
106 OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */ 109 OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */
107 OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */ 110 OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */
108 OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */ 111 OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */
112 OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA, /* 4 */
109 OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */ 113 OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */
110 OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */ 114 OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */
111 OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */ 115 OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */
112 OSD_ATTR_RI_CLOCK = 0x100, /* 6 */ 116 OSD_ATTR_RI_CLOCK = 0x100, /* 6 */
117 OARI_DEFAULT_ISOLATION_METHOD = 0X110, /* 1 */
118 OARI_SUPPORTED_ISOLATION_METHODS = 0X111, /* 32 */
119
120 OARI_DATA_ATOMICITY_GUARANTEE = 0X120, /* 8 */
121 OARI_DATA_ATOMICITY_ALIGNMENT = 0X121, /* 8 */
122 OARI_ATTRIBUTES_ATOMICITY_GUARANTEE = 0X122, /* 8 */
123 OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER = 0X123, /* 1 */
124
125 OARI_MAXIMUM_SNAPSHOTS_COUNT = 0X1C1, /* 0 or 4 */
126 OARI_MAXIMUM_CLONES_COUNT = 0X1C2, /* 0 or 4 */
127 OARI_MAXIMUM_BRANCH_DEPTH = 0X1CC, /* 0 or 4 */
128 OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST = 0X200, /* 0 or 4 */
129 OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST = 0X2ff, /* 0 or 4 */
130 OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300, /* 0 or 4 */
131 OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST = 0X30F, /* 0 or 4 */
132 OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING = 0X310, /* 0 or 4 */
133 OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING = 0X311, /* 0 or 1 */
134 OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */
135 OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST = 0X700FFFF,/* 0 or 4 */
113}; 136};
114/* Root_Information_attributes_page does not have a get_page structure */ 137/* Root_Information_attributes_page does not have a get_page structure */
115 138
@@ -120,7 +143,15 @@ enum {
120 OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */ 143 OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */
121 OSD_ATTR_PI_USERNAME = 0x9, /* variable */ 144 OSD_ATTR_PI_USERNAME = 0x9, /* variable */
122 OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */ 145 OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */
146 OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84, /* 0 or 8 */
123 OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */ 147 OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */
148
149 OSD_ATTR_PI_ACTUAL_DATA_SPACE = 0xD1, /* 0 or 8 */
150 OSD_ATTR_PI_RESERVED_DATA_SPACE = 0xD2, /* 0 or 8 */
151 OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD = 0x200,/* 0 or 4 */
152 OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD = 0x201,/* 0 or 4 */
153 OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION = 0x300,/* 0 or 4 */
154 OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION = 0x301,/* 0 or 4 */
124}; 155};
125/* Partition Information attributes page does not have a get_page structure */ 156/* Partition Information attributes page does not have a get_page structure */
126 157
@@ -131,6 +162,7 @@ enum {
131 OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */ 162 OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */
132 OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */ 163 OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */
133 OSD_ATTR_CI_USERNAME = 0x9, /* variable */ 164 OSD_ATTR_CI_USERNAME = 0x9, /* variable */
165 OSD_ATTR_CI_COLLECTION_TYPE = 0xA, /* 1 */
134 OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */ 166 OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */
135}; 167};
136/* Collection Information attributes page does not have a get_page structure */ 168/* Collection Information attributes page does not have a get_page structure */
@@ -144,6 +176,8 @@ enum {
144 OSD_ATTR_OI_USERNAME = 0x9, /* variable */ 176 OSD_ATTR_OI_USERNAME = 0x9, /* variable */
145 OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */ 177 OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */
146 OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */ 178 OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */
179 SD_ATTR_OI_ACTUAL_DATA_SPACE = 0XD1, /* 0 OR 8 */
180 SD_ATTR_OI_RESERVED_DATA_SPACE = 0XD2, /* 0 OR 8 */
147}; 181};
148/* Object Information attributes page does not have a get_page structure */ 182/* Object Information attributes page does not have a get_page structure */
149 183
@@ -248,7 +282,18 @@ struct object_timestamps_attributes_page {
248 struct osd_timestamp data_modified_time; 282 struct osd_timestamp data_modified_time;
249} __packed; 283} __packed;
250 284
251/* 7.1.2.19 Collections attributes page */ 285/* OSD2r05: 7.1.3.19 Attributes Access attributes page
286 * (OSD_APAGE_PARTITION_ATTR_ACCESS)
287 *
288 * each attribute is of the form below. Total array length is deduced
289 * from the attribute's length
290 * (See allowed_attributes_access of the struct osd_cap_object_descriptor)
291 */
292struct attributes_access_attr {
293 struct osd_attributes_list_attrid attr_list[0];
294} __packed;
295
296/* OSD2r05: 7.1.2.21 Collections attributes page */
252/* TBD */ 297/* TBD */
253 298
254/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */ 299/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
@@ -324,4 +369,29 @@ struct object_security_attributes_page {
324 __be32 policy_access_tag; 369 __be32 policy_access_tag;
325} __packed; 370} __packed;
326 371
372/* OSD2r05: 7.1.3.31 Current Command attributes page
373 * (OSD_APAGE_CURRENT_COMMAND)
374 */
375enum {
376 OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE = 0x1, /* 32 */
377 OSD_ATTR_CC_OBJECT_TYPE = 0x2, /* 1 */
378 OSD_ATTR_CC_PARTITION_ID = 0x3, /* 8 */
379 OSD_ATTR_CC_OBJECT_ID = 0x4, /* 8 */
380 OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND = 0x5, /* 8 */
381 OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY = 0x6, /* 8 */
382};
383
384/*TBD: osdv1_current_command_attributes_page */
385
386struct osdv2_current_command_attributes_page {
387 struct osd_attr_page_header hdr; /* id=0xFFFFFFFE, size=0x44 */
388 u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
389 u8 object_type;
390 u8 reserved[3];
391 __be64 partition_id;
392 __be64 object_id;
393 __be64 starting_byte_address_of_append;
394 __be64 change_in_used_capacity;
395};
396
327#endif /*ndef __OSD_ATTRIBUTES_H__*/ 397#endif /*ndef __OSD_ATTRIBUTES_H__*/
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index b24d9616eb46..02bd9f716357 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -18,6 +18,7 @@
18#include "osd_types.h" 18#include "osd_types.h"
19 19
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <scsi/scsi_device.h>
21 22
22/* Note: "NI" in comments below means "Not Implemented yet" */ 23/* Note: "NI" in comments below means "Not Implemented yet" */
23 24
@@ -47,6 +48,7 @@ enum osd_std_version {
47 */ 48 */
48struct osd_dev { 49struct osd_dev {
49 struct scsi_device *scsi_device; 50 struct scsi_device *scsi_device;
51 struct file *file;
50 unsigned def_timeout; 52 unsigned def_timeout;
51 53
52#ifdef OSD_VER1_SUPPORT 54#ifdef OSD_VER1_SUPPORT
@@ -69,6 +71,10 @@ void osd_dev_fini(struct osd_dev *od);
69 71
70/* some hi level device operations */ 72/* some hi level device operations */
71int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */ 73int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */
74static inline struct request_queue *osd_request_queue(struct osd_dev *od)
75{
76 return od->scsi_device->request_queue;
77}
72 78
73/* we might want to use function vector in the future */ 79/* we might want to use function vector in the future */
74static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v) 80static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v)
@@ -363,7 +369,9 @@ void osd_req_create_object(struct osd_request *or, struct osd_obj_id *);
363void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *); 369void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *);
364 370
365void osd_req_write(struct osd_request *or, 371void osd_req_write(struct osd_request *or,
366 const struct osd_obj_id *, struct bio *data_out, u64 offset); 372 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
373int osd_req_write_kern(struct osd_request *or,
374 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
367void osd_req_append(struct osd_request *or, 375void osd_req_append(struct osd_request *or,
368 const struct osd_obj_id *, struct bio *data_out);/* NI */ 376 const struct osd_obj_id *, struct bio *data_out);/* NI */
369void osd_req_create_write(struct osd_request *or, 377void osd_req_create_write(struct osd_request *or,
@@ -378,7 +386,9 @@ void osd_req_flush_object(struct osd_request *or,
378 /*V2*/ u64 offset, /*V2*/ u64 len); 386 /*V2*/ u64 offset, /*V2*/ u64 len);
379 387
380void osd_req_read(struct osd_request *or, 388void osd_req_read(struct osd_request *or,
381 const struct osd_obj_id *, struct bio *data_in, u64 offset); 389 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
390int osd_req_read_kern(struct osd_request *or,
391 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
382 392
383/* 393/*
384 * Root/Partition/Collection/Object Attributes commands 394 * Root/Partition/Collection/Object Attributes commands
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index 62b2ab8c69d4..2cc8e8b1cc19 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -303,7 +303,15 @@ enum osd_service_actions {
303 OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21) 303 OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21)
304 OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22) 304 OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22)
305 OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23) 305 OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23)
306
307 OSD_ACT_V2(CREATE_CLONE, 0x28)
308 OSD_ACT_V2(CREATE_SNAPSHOT, 0x29)
309 OSD_ACT_V2(DETACH_CLONE, 0x2A)
310 OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE, 0x2B)
311 OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C)
312
306 OSD_ACT_V2(READ_MAP, 0x31) 313 OSD_ACT_V2(READ_MAP, 0x31)
314 OSD_ACT_V2(READ_MAPS_COMPARE, 0x32)
307 315
308 OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C) 316 OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C)
309 OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D) 317 OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D)
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 457588e1119b..349c7f30720d 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -126,12 +126,14 @@ struct iscsi_transport {
126 int *index, int *age); 126 int *index, int *age);
127 127
128 void (*session_recovery_timedout) (struct iscsi_cls_session *session); 128 void (*session_recovery_timedout) (struct iscsi_cls_session *session);
129 struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr, 129 struct iscsi_endpoint *(*ep_connect) (struct Scsi_Host *shost,
130 struct sockaddr *dst_addr,
130 int non_blocking); 131 int non_blocking);
131 int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms); 132 int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
132 void (*ep_disconnect) (struct iscsi_endpoint *ep); 133 void (*ep_disconnect) (struct iscsi_endpoint *ep);
133 int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, 134 int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
134 uint32_t enable, struct sockaddr *dst_addr); 135 uint32_t enable, struct sockaddr *dst_addr);
136 int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params);
135}; 137};
136 138
137/* 139/*
@@ -148,6 +150,10 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
148extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 150extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
149 char *data, uint32_t data_size); 151 char *data, uint32_t data_size);
150 152
153extern int iscsi_offload_mesg(struct Scsi_Host *shost,
154 struct iscsi_transport *transport, uint32_t type,
155 char *data, uint16_t data_size);
156
151struct iscsi_cls_conn { 157struct iscsi_cls_conn {
152 struct list_head conn_list; /* item in connlist */ 158 struct list_head conn_list; /* item in connlist */
153 void *dd_data; /* LLD private data */ 159 void *dd_data; /* LLD private data */