aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas47
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/scsi/bfa/bfa.h48
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim.h22
-rw-r--r--drivers/scsi/bfa/bfa_core.c178
-rw-r--r--drivers/scsi/bfa/bfa_cs.h24
-rw-r--r--drivers/scsi/bfa/bfa_defs.h54
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h48
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h82
-rw-r--r--drivers/scsi/bfa/bfa_drv.c6
-rw-r--r--drivers/scsi/bfa/bfa_fc.h30
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c196
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c351
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h18
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c335
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h43
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c34
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c468
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c198
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c16
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c397
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h108
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c90
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c167
-rw-r--r--drivers/scsi/bfa/bfa_modules.h6
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h82
-rw-r--r--drivers/scsi/bfa/bfa_port.c40
-rw-r--r--drivers/scsi/bfa/bfa_svc.c504
-rw-r--r--drivers/scsi/bfa/bfa_svc.h41
-rw-r--r--drivers/scsi/bfa/bfad.c73
-rw-r--r--drivers/scsi/bfa/bfad_attr.c38
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c4
-rw-r--r--drivers/scsi/bfa/bfad_drv.h5
-rw-r--r--drivers/scsi/bfa/bfad_im.c71
-rw-r--r--drivers/scsi/bfa/bfi.h58
-rw-r--r--drivers/scsi/bfa/bfi_ms.h50
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2
-rw-r--r--drivers/scsi/gdth.c8
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_disc.c5
-rw-r--r--drivers/scsi/libfc/fc_fcp.c24
-rw-r--r--drivers/scsi/libfc/fc_lport.c12
-rw-r--r--drivers/scsi/libfc/fc_rport.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c439
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c437
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h167
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c102
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c458
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c126
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h7
-rw-r--r--drivers/scsi/osd/osd_initiator.c244
-rw-r--r--drivers/scsi/pmcraid.c129
-rw-r--r--drivers/scsi/pmcraid.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c125
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c207
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c101
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h20
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c89
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c109
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sr_ioctl.c9
-rw-r--r--include/scsi/libfc.h2
-rw-r--r--include/scsi/osd_initiator.h16
-rw-r--r--include/scsi/osd_protocol.h42
-rw-r--r--include/scsi/osd_types.h5
98 files changed, 4160 insertions, 3006 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 30023568805..00301ed9c37 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,50 @@
11 Release Date : Thur. May 03, 2010 09:12:45 PST 2009 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Bo Yang
4
52 Current Version : 00.00.04.31-rc1
63 Older Version : 00.00.04.17.1-rc1
7
81. Add the Online Controller Reset (OCR) to the Driver.
9 OCR is the new feature for megaraid_sas driver which
10 will allow the fw to do the chip reset which will not
11 affact the OS behavious.
12
13 To add the OCR support, driver need to do:
14 a). reset the controller chips -- Xscale and Gen2 which
15 will change the function calls and add the reset function
16 related to this two chips.
17
18 b). during the reset, driver will store the pending cmds
19 which not returned by FW to driver's pending queue. Driver
20 will re-issue those pending cmds again to FW after the OCR
21 finished.
22
23 c). In driver's timeout routine, driver will report to
24 OS as reset. Also driver's queue routine will block the
25 cmds until the OCR finished.
26
27 d). in Driver's ISR routine, if driver get the FW state as
28 state change, FW in Failure status and FW support online controller
29 reset (OCR), driver will start to do the controller reset.
30
31 e). In driver's IOCTL routine, the application cmds will wait for the
32 OCR to finish, then issue the cmds to FW.
33
34 f). Before driver kill adapter, driver will do last chance of
35 OCR to see if driver can bring back the FW.
36
372. Add the support update flag to the driver to tell LSI megaraid_sas
38 application which driver will support the device update. So application
39 will not need to do the device update after application add/del the device
40 from the system.
413. In driver's timeout routine, driver will do three time reset if fw is in
42 failed state. Driver will kill adapter if can't bring back FW after the
43 this three times reset.
444. Add the input parameter max_sectors to 1MB support to our GEN2 controller.
45 customer can use the input paramenter max_sectors to add 1MB support to GEN2
46 controller.
47
11 Release Date : Thur. Oct 29, 2009 09:12:45 PST 2009 - 481 Release Date : Thur. Oct 29, 2009 09:12:45 PST 2009 -
2 (emaild-id:megaraidlinux@lsi.com) 49 (emaild-id:megaraidlinux@lsi.com)
3 Bo Yang 50 Bo Yang
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 938d5036016..b464ae01086 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -270,7 +270,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
270 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { 270 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
271 sense = (char *) &fcp_rsp[1]; 271 sense = (char *) &fcp_rsp[1];
272 if (rsp_flags & FCP_RSP_LEN_VAL) 272 if (rsp_flags & FCP_RSP_LEN_VAL)
273 sense += fcp_rsp->ext.fr_sns_len; 273 sense += fcp_rsp->ext.fr_rsp_len;
274 sense_len = min(fcp_rsp->ext.fr_sns_len, 274 sense_len = min(fcp_rsp->ext.fr_sns_len,
275 (u32) SCSI_SENSE_BUFFERSIZE); 275 (u32) SCSI_SENSE_BUFFERSIZE);
276 memcpy(scsi->sense_buffer, sense, sense_len); 276 memcpy(scsi->sense_buffer, sense, sense_len);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index beaf0916cea..be031745714 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -532,9 +532,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
532 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 532 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
533 adapter->hydra_version = 0; 533 adapter->hydra_version = 0;
534 534
535 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
536 &adapter->status);
537
538 zfcp_fsf_link_down_info_eval(req, 535 zfcp_fsf_link_down_info_eval(req,
539 &qtcb->header.fsf_status_qual.link_down_info); 536 &qtcb->header.fsf_status_qual.link_down_info);
540 break; 537 break;
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1119c535a66..20796ebc33c 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -142,6 +142,8 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
142 return -ENOMEM; 142 return -ENOMEM;
143 } 143 }
144 144
145 get_device(&port->dev);
146
145 if (device_register(&unit->dev)) { 147 if (device_register(&unit->dev)) {
146 put_device(&unit->dev); 148 put_device(&unit->dev);
147 return -ENOMEM; 149 return -ENOMEM;
@@ -152,8 +154,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
152 return -EINVAL; 154 return -EINVAL;
153 } 155 }
154 156
155 get_device(&port->dev);
156
157 write_lock_irq(&port->unit_list_lock); 157 write_lock_irq(&port->unit_list_lock);
158 list_add_tail(&unit->list, &port->unit_list); 158 list_add_tail(&unit->list, &port->unit_list);
159 write_unlock_irq(&port->unit_list_lock); 159 write_unlock_irq(&port->unit_list_lock);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index ceaac65a91f..ff2bd07161f 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -29,13 +29,13 @@ struct bfa_s;
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); 29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); 30typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
31 31
32/** 32/*
33 * Interrupt message handlers 33 * Interrupt message handlers
34 */ 34 */
35void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); 35void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
36void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); 36void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
37 37
38/** 38/*
39 * Request and response queue related defines 39 * Request and response queue related defines
40 */ 40 */
41#define BFA_REQQ_NELEMS_MIN (4) 41#define BFA_REQQ_NELEMS_MIN (4)
@@ -58,9 +58,9 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
58#define bfa_reqq_produce(__bfa, __reqq) do { \ 58#define bfa_reqq_produce(__bfa, __reqq) do { \
59 (__bfa)->iocfc.req_cq_pi[__reqq]++; \ 59 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
60 (__bfa)->iocfc.req_cq_pi[__reqq] &= \ 60 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
61 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ 61 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
62 bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \ 62 writel((__bfa)->iocfc.req_cq_pi[__reqq], \
63 (__bfa)->iocfc.req_cq_pi[__reqq]); \ 63 (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \
64 mmiowb(); \ 64 mmiowb(); \
65 } while (0) 65 } while (0)
66 66
@@ -76,7 +76,7 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
76 (__index) &= ((__size) - 1); \ 76 (__index) &= ((__size) - 1); \
77} while (0) 77} while (0)
78 78
79/** 79/*
80 * Queue element to wait for room in request queue. FIFO order is 80 * Queue element to wait for room in request queue. FIFO order is
81 * maintained when fullfilling requests. 81 * maintained when fullfilling requests.
82 */ 82 */
@@ -86,7 +86,7 @@ struct bfa_reqq_wait_s {
86 void *cbarg; 86 void *cbarg;
87}; 87};
88 88
89/** 89/*
90 * Circular queue usage assignments 90 * Circular queue usage assignments
91 */ 91 */
92enum { 92enum {
@@ -113,7 +113,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
113 113
114#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) 114#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
115 115
116/** 116/*
117 * static inline void 117 * static inline void
118 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe) 118 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
119 */ 119 */
@@ -130,7 +130,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
130#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) 130#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
131 131
132 132
133/** 133/*
134 * Generic BFA callback element. 134 * Generic BFA callback element.
135 */ 135 */
136struct bfa_cb_qe_s { 136struct bfa_cb_qe_s {
@@ -163,7 +163,7 @@ struct bfa_cb_qe_s {
163 } while (0) 163 } while (0)
164 164
165 165
166/** 166/*
167 * PCI devices supported by the current BFA 167 * PCI devices supported by the current BFA
168 */ 168 */
169struct bfa_pciid_s { 169struct bfa_pciid_s {
@@ -173,7 +173,7 @@ struct bfa_pciid_s {
173 173
174extern char bfa_version[]; 174extern char bfa_version[];
175 175
176/** 176/*
177 * BFA memory resources 177 * BFA memory resources
178 */ 178 */
179enum bfa_mem_type { 179enum bfa_mem_type {
@@ -202,19 +202,19 @@ struct bfa_meminfo_s {
202 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) 202 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
203 203
204struct bfa_iocfc_regs_s { 204struct bfa_iocfc_regs_s {
205 bfa_os_addr_t intr_status; 205 void __iomem *intr_status;
206 bfa_os_addr_t intr_mask; 206 void __iomem *intr_mask;
207 bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS]; 207 void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS];
208 bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS]; 208 void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS];
209 bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS]; 209 void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS];
210 bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS]; 210 void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS];
211 bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS]; 211 void __iomem *rme_q_ci[BFI_IOC_MAX_CQS];
212 bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS]; 212 void __iomem *rme_q_pi[BFI_IOC_MAX_CQS];
213 bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS]; 213 void __iomem *rme_q_depth[BFI_IOC_MAX_CQS];
214 bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS]; 214 void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS];
215}; 215};
216 216
217/** 217/*
218 * MSIX vector handlers 218 * MSIX vector handlers
219 */ 219 */
220#define BFA_MSIX_MAX_VECTORS 22 220#define BFA_MSIX_MAX_VECTORS 22
@@ -224,7 +224,7 @@ struct bfa_msix_s {
224 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS]; 224 bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
225}; 225};
226 226
227/** 227/*
228 * Chip specific interfaces 228 * Chip specific interfaces
229 */ 229 */
230struct bfa_hwif_s { 230struct bfa_hwif_s {
@@ -343,7 +343,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
343 struct bfi_pbc_vport_s *pbc_vport); 343 struct bfi_pbc_vport_s *pbc_vport);
344 344
345 345
346/** 346/*
347 *---------------------------------------------------------------------- 347 *----------------------------------------------------------------------
348 * BFA public interfaces 348 * BFA public interfaces
349 *---------------------------------------------------------------------- 349 *----------------------------------------------------------------------
diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h
index a989a94c38d..6f021015f1f 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim.h
@@ -37,18 +37,18 @@ bfad_int_to_lun(u32 luno)
37 } lun; 37 } lun;
38 38
39 lun.bfa_lun = 0; 39 lun.bfa_lun = 0;
40 lun.scsi_lun[0] = bfa_os_htons(luno); 40 lun.scsi_lun[0] = cpu_to_be16(luno);
41 41
42 return lun.bfa_lun; 42 return lun.bfa_lun;
43} 43}
44 44
45/** 45/*
46 * Get LUN for the I/O request 46 * Get LUN for the I/O request
47 */ 47 */
48#define bfa_cb_ioim_get_lun(__dio) \ 48#define bfa_cb_ioim_get_lun(__dio) \
49 bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun) 49 bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
50 50
51/** 51/*
52 * Get CDB for the I/O request 52 * Get CDB for the I/O request
53 */ 53 */
54static inline u8 * 54static inline u8 *
@@ -59,7 +59,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
59 return (u8 *) cmnd->cmnd; 59 return (u8 *) cmnd->cmnd;
60} 60}
61 61
62/** 62/*
63 * Get I/O direction (read/write) for the I/O request 63 * Get I/O direction (read/write) for the I/O request
64 */ 64 */
65static inline enum fcp_iodir 65static inline enum fcp_iodir
@@ -77,7 +77,7 @@ bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
77 return FCP_IODIR_NONE; 77 return FCP_IODIR_NONE;
78} 78}
79 79
80/** 80/*
81 * Get IO size in bytes for the I/O request 81 * Get IO size in bytes for the I/O request
82 */ 82 */
83static inline u32 83static inline u32
@@ -88,7 +88,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
88 return scsi_bufflen(cmnd); 88 return scsi_bufflen(cmnd);
89} 89}
90 90
91/** 91/*
92 * Get timeout for the I/O request 92 * Get timeout for the I/O request
93 */ 93 */
94static inline u8 94static inline u8
@@ -104,7 +104,7 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
104 return 0; 104 return 0;
105} 105}
106 106
107/** 107/*
108 * Get Command Reference Number for the I/O request. 0 if none. 108 * Get Command Reference Number for the I/O request. 0 if none.
109 */ 109 */
110static inline u8 110static inline u8
@@ -113,7 +113,7 @@ bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
113 return 0; 113 return 0;
114} 114}
115 115
116/** 116/*
117 * Get SAM-3 priority for the I/O request. 0 is default. 117 * Get SAM-3 priority for the I/O request. 0 is default.
118 */ 118 */
119static inline u8 119static inline u8
@@ -122,7 +122,7 @@ bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
122 return 0; 122 return 0;
123} 123}
124 124
125/** 125/*
126 * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0). 126 * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
127 */ 127 */
128static inline u8 128static inline u8
@@ -148,7 +148,7 @@ bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
148 return task_attr; 148 return task_attr;
149} 149}
150 150
151/** 151/*
152 * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16). 152 * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
153 */ 153 */
154static inline u8 154static inline u8
@@ -159,7 +159,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
159 return cmnd->cmd_len; 159 return cmnd->cmd_len;
160} 160}
161 161
162/** 162/*
163 * Assign queue to be used for the I/O request. This value depends on whether 163 * Assign queue to be used for the I/O request. This value depends on whether
164 * the driver wants to use the queues via any specific algorithm. Currently, 164 * the driver wants to use the queues via any specific algorithm. Currently,
165 * this is not supported. 165 * this is not supported.
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c2fa07f2485..2345f48dc57 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -21,11 +21,11 @@
21 21
22BFA_TRC_FILE(HAL, CORE); 22BFA_TRC_FILE(HAL, CORE);
23 23
24/** 24/*
25 * BFA IOC FC related definitions 25 * BFA IOC FC related definitions
26 */ 26 */
27 27
28/** 28/*
29 * IOC local definitions 29 * IOC local definitions
30 */ 30 */
31#define BFA_IOCFC_TOV 5000 /* msecs */ 31#define BFA_IOCFC_TOV 5000 /* msecs */
@@ -54,7 +54,7 @@ enum {
54#define DEF_CFG_NUM_SBOOT_TGTS 16 54#define DEF_CFG_NUM_SBOOT_TGTS 16
55#define DEF_CFG_NUM_SBOOT_LUNS 16 55#define DEF_CFG_NUM_SBOOT_LUNS 16
56 56
57/** 57/*
58 * forward declaration for IOC FC functions 58 * forward declaration for IOC FC functions
59 */ 59 */
60static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 60static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
@@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
63static void bfa_iocfc_reset_cbfn(void *bfa_arg); 63static void bfa_iocfc_reset_cbfn(void *bfa_arg);
64static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 64static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
65 65
66/** 66/*
67 * BFA Interrupt handling functions 67 * BFA Interrupt handling functions
68 */ 68 */
69static void 69static void
@@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
86 86
87 waitq = bfa_reqq(bfa, qid); 87 waitq = bfa_reqq(bfa, qid);
88 list_for_each_safe(qe, qen, waitq) { 88 list_for_each_safe(qe, qen, waitq) {
89 /** 89 /*
90 * Callback only as long as there is room in request queue 90 * Callback only as long as there is room in request queue
91 */ 91 */
92 if (bfa_reqq_full(bfa, qid)) 92 if (bfa_reqq_full(bfa, qid))
@@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
104 bfa_intx(bfa); 104 bfa_intx(bfa);
105} 105}
106 106
107/** 107/*
108 * hal_intr_api 108 * hal_intr_api
109 */ 109 */
110bfa_boolean_t 110bfa_boolean_t
@@ -113,15 +113,15 @@ bfa_intx(struct bfa_s *bfa)
113 u32 intr, qintr; 113 u32 intr, qintr;
114 int queue; 114 int queue;
115 115
116 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 116 intr = readl(bfa->iocfc.bfa_regs.intr_status);
117 if (!intr) 117 if (!intr)
118 return BFA_FALSE; 118 return BFA_FALSE;
119 119
120 /** 120 /*
121 * RME completion queue interrupt 121 * RME completion queue interrupt
122 */ 122 */
123 qintr = intr & __HFN_INT_RME_MASK; 123 qintr = intr & __HFN_INT_RME_MASK;
124 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 124 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
125 125
126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
127 if (intr & (__HFN_INT_RME_Q0 << queue)) 127 if (intr & (__HFN_INT_RME_Q0 << queue))
@@ -131,11 +131,11 @@ bfa_intx(struct bfa_s *bfa)
131 if (!intr) 131 if (!intr)
132 return BFA_TRUE; 132 return BFA_TRUE;
133 133
134 /** 134 /*
135 * CPE completion queue interrupt 135 * CPE completion queue interrupt
136 */ 136 */
137 qintr = intr & __HFN_INT_CPE_MASK; 137 qintr = intr & __HFN_INT_CPE_MASK;
138 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 138 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
139 139
140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
141 if (intr & (__HFN_INT_CPE_Q0 << queue)) 141 if (intr & (__HFN_INT_CPE_Q0 << queue))
@@ -153,13 +153,13 @@ bfa_intx(struct bfa_s *bfa)
153void 153void
154bfa_intx_enable(struct bfa_s *bfa) 154bfa_intx_enable(struct bfa_s *bfa)
155{ 155{
156 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask); 156 writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
157} 157}
158 158
159void 159void
160bfa_intx_disable(struct bfa_s *bfa) 160bfa_intx_disable(struct bfa_s *bfa)
161{ 161{
162 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); 162 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
163} 163}
164 164
165void 165void
@@ -188,8 +188,8 @@ bfa_isr_enable(struct bfa_s *bfa)
188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | 188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
189 __HFN_INT_MBOX_LPU1); 189 __HFN_INT_MBOX_LPU1);
190 190
191 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); 191 writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
192 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); 192 writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
193 bfa->iocfc.intr_mask = ~intr_unmask; 193 bfa->iocfc.intr_mask = ~intr_unmask;
194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
195} 195}
@@ -198,7 +198,7 @@ void
198bfa_isr_disable(struct bfa_s *bfa) 198bfa_isr_disable(struct bfa_s *bfa)
199{ 199{
200 bfa_isr_mode_set(bfa, BFA_FALSE); 200 bfa_isr_mode_set(bfa, BFA_FALSE);
201 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); 201 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
202 bfa_msix_uninstall(bfa); 202 bfa_msix_uninstall(bfa);
203} 203}
204 204
@@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid)
211 211
212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
213 213
214 /** 214 /*
215 * Resume any pending requests in the corresponding reqq. 215 * Resume any pending requests in the corresponding reqq.
216 */ 216 */
217 waitq = bfa_reqq(bfa, qid); 217 waitq = bfa_reqq(bfa, qid);
@@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
259 } 259 }
260 } 260 }
261 261
262 /** 262 /*
263 * update CI 263 * update CI
264 */ 264 */
265 bfa_rspq_ci(bfa, qid) = pi; 265 bfa_rspq_ci(bfa, qid) = pi;
266 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); 266 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
267 mmiowb(); 267 mmiowb();
268 268
269 /** 269 /*
270 * Resume any pending requests in the corresponding reqq. 270 * Resume any pending requests in the corresponding reqq.
271 */ 271 */
272 waitq = bfa_reqq(bfa, qid); 272 waitq = bfa_reqq(bfa, qid);
@@ -279,7 +279,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
279{ 279{
280 u32 intr, curr_value; 280 u32 intr, curr_value;
281 281
282 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 282 intr = readl(bfa->iocfc.bfa_regs.intr_status);
283 283
284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
285 bfa_msix_lpu(bfa); 285 bfa_msix_lpu(bfa);
@@ -289,30 +289,30 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
289 289
290 if (intr) { 290 if (intr) {
291 if (intr & __HFN_INT_LL_HALT) { 291 if (intr & __HFN_INT_LL_HALT) {
292 /** 292 /*
293 * If LL_HALT bit is set then FW Init Halt LL Port 293 * If LL_HALT bit is set then FW Init Halt LL Port
294 * Register needs to be cleared as well so Interrupt 294 * Register needs to be cleared as well so Interrupt
295 * Status Register will be cleared. 295 * Status Register will be cleared.
296 */ 296 */
297 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); 297 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
298 curr_value &= ~__FW_INIT_HALT_P; 298 curr_value &= ~__FW_INIT_HALT_P;
299 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); 299 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
300 } 300 }
301 301
302 if (intr & __HFN_INT_ERR_PSS) { 302 if (intr & __HFN_INT_ERR_PSS) {
303 /** 303 /*
304 * ERR_PSS bit needs to be cleared as well in case 304 * ERR_PSS bit needs to be cleared as well in case
305 * interrups are shared so driver's interrupt handler is 305 * interrups are shared so driver's interrupt handler is
306 * still called eventhough it is already masked out. 306 * still called eventhough it is already masked out.
307 */ 307 */
308 curr_value = bfa_reg_read( 308 curr_value = readl(
309 bfa->ioc.ioc_regs.pss_err_status_reg); 309 bfa->ioc.ioc_regs.pss_err_status_reg);
310 curr_value &= __PSS_ERR_STATUS_SET; 310 curr_value &= __PSS_ERR_STATUS_SET;
311 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, 311 writel(curr_value,
312 curr_value); 312 bfa->ioc.ioc_regs.pss_err_status_reg);
313 } 313 }
314 314
315 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); 315 writel(intr, bfa->iocfc.bfa_regs.intr_status);
316 bfa_msix_errint(bfa, intr); 316 bfa_msix_errint(bfa, intr);
317 } 317 }
318} 318}
@@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
323 bfa_isrs[mc] = isr_func; 323 bfa_isrs[mc] = isr_func;
324} 324}
325 325
326/** 326/*
327 * BFA IOC FC related functions 327 * BFA IOC FC related functions
328 */ 328 */
329 329
330/** 330/*
331 * hal_ioc_pvt BFA IOC private functions 331 * hal_ioc_pvt BFA IOC private functions
332 */ 332 */
333 333
@@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
366 BFA_CACHELINE_SZ); 366 BFA_CACHELINE_SZ);
367} 367}
368 368
369/** 369/*
370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
371 */ 371 */
372static void 372static void
@@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg)
384 384
385 bfa_iocfc_reset_queues(bfa); 385 bfa_iocfc_reset_queues(bfa);
386 386
387 /** 387 /*
388 * initialize IOC configuration info 388 * initialize IOC configuration info
389 */ 389 */
390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
391 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 391 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
392 392
393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
394 /** 394 /*
395 * dma map REQ and RSP circular queues and shadow pointers 395 * dma map REQ and RSP circular queues and shadow pointers
396 */ 396 */
397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
@@ -400,17 +400,17 @@ bfa_iocfc_send_cfg(void *bfa_arg)
400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], 400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
401 iocfc->req_cq_shadow_ci[i].pa); 401 iocfc->req_cq_shadow_ci[i].pa);
402 cfg_info->req_cq_elems[i] = 402 cfg_info->req_cq_elems[i] =
403 bfa_os_htons(cfg->drvcfg.num_reqq_elems); 403 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
404 404
405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], 405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
406 iocfc->rsp_cq_ba[i].pa); 406 iocfc->rsp_cq_ba[i].pa);
407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], 407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
408 iocfc->rsp_cq_shadow_pi[i].pa); 408 iocfc->rsp_cq_shadow_pi[i].pa);
409 cfg_info->rsp_cq_elems[i] = 409 cfg_info->rsp_cq_elems[i] =
410 bfa_os_htons(cfg->drvcfg.num_rspq_elems); 410 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
411 } 411 }
412 412
413 /** 413 /*
414 * Enable interrupt coalescing if it is driver init path 414 * Enable interrupt coalescing if it is driver init path
415 * and not ioc disable/enable path. 415 * and not ioc disable/enable path.
416 */ 416 */
@@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
419 419
420 iocfc->cfgdone = BFA_FALSE; 420 iocfc->cfgdone = BFA_FALSE;
421 421
422 /** 422 /*
423 * dma map IOC configuration itself 423 * dma map IOC configuration itself
424 */ 424 */
425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@@ -440,9 +440,9 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
440 iocfc->bfa = bfa; 440 iocfc->bfa = bfa;
441 iocfc->action = BFA_IOCFC_ACT_NONE; 441 iocfc->action = BFA_IOCFC_ACT_NONE;
442 442
443 bfa_os_assign(iocfc->cfg, *cfg); 443 iocfc->cfg = *cfg;
444 444
445 /** 445 /*
446 * Initialize chip specific handlers. 446 * Initialize chip specific handlers.
447 */ 447 */
448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { 448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
@@ -503,13 +503,13 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
504 iocfc->req_cq_ba[i].kva = dm_kva; 504 iocfc->req_cq_ba[i].kva = dm_kva;
505 iocfc->req_cq_ba[i].pa = dm_pa; 505 iocfc->req_cq_ba[i].pa = dm_pa;
506 bfa_os_memset(dm_kva, 0, per_reqq_sz); 506 memset(dm_kva, 0, per_reqq_sz);
507 dm_kva += per_reqq_sz; 507 dm_kva += per_reqq_sz;
508 dm_pa += per_reqq_sz; 508 dm_pa += per_reqq_sz;
509 509
510 iocfc->rsp_cq_ba[i].kva = dm_kva; 510 iocfc->rsp_cq_ba[i].kva = dm_kva;
511 iocfc->rsp_cq_ba[i].pa = dm_pa; 511 iocfc->rsp_cq_ba[i].pa = dm_pa;
512 bfa_os_memset(dm_kva, 0, per_rspq_sz); 512 memset(dm_kva, 0, per_rspq_sz);
513 dm_kva += per_rspq_sz; 513 dm_kva += per_rspq_sz;
514 dm_pa += per_rspq_sz; 514 dm_pa += per_rspq_sz;
515 } 515 }
@@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
559 } 559 }
560} 560}
561 561
562/** 562/*
563 * Start BFA submodules. 563 * Start BFA submodules.
564 */ 564 */
565static void 565static void
@@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
573 hal_mods[i]->start(bfa); 573 hal_mods[i]->start(bfa);
574} 574}
575 575
576/** 576/*
577 * Disable BFA submodules. 577 * Disable BFA submodules.
578 */ 578 */
579static void 579static void
@@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
623 complete(&bfad->disable_comp); 623 complete(&bfad->disable_comp);
624} 624}
625 625
626/** 626/*
627 * Update BFA configuration from firmware configuration. 627 * Update BFA configuration from firmware configuration.
628 */ 628 */
629static void 629static void
@@ -634,15 +634,15 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; 634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
635 635
636 fwcfg->num_cqs = fwcfg->num_cqs; 636 fwcfg->num_cqs = fwcfg->num_cqs;
637 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); 637 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
638 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); 638 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
639 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); 639 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
640 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); 640 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
641 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); 641 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
642 642
643 iocfc->cfgdone = BFA_TRUE; 643 iocfc->cfgdone = BFA_TRUE;
644 644
645 /** 645 /*
646 * Configuration is complete - initialize/start submodules 646 * Configuration is complete - initialize/start submodules
647 */ 647 */
648 bfa_fcport_init(bfa); 648 bfa_fcport_init(bfa);
@@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
665 } 665 }
666} 666}
667 667
668/** 668/*
669 * IOC enable request is complete 669 * IOC enable request is complete
670 */ 670 */
671static void 671static void
@@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
684 bfa_iocfc_send_cfg(bfa); 684 bfa_iocfc_send_cfg(bfa);
685} 685}
686 686
687/** 687/*
688 * IOC disable request is complete 688 * IOC disable request is complete
689 */ 689 */
690static void 690static void
@@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
705 } 705 }
706} 706}
707 707
708/** 708/*
709 * Notify sub-modules of hardware failure. 709 * Notify sub-modules of hardware failure.
710 */ 710 */
711static void 711static void
@@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
723 bfa); 723 bfa);
724} 724}
725 725
726/** 726/*
727 * Actions on chip-reset completion. 727 * Actions on chip-reset completion.
728 */ 728 */
729static void 729static void
@@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
735 bfa_isr_enable(bfa); 735 bfa_isr_enable(bfa);
736} 736}
737 737
738/** 738/*
739 * hal_ioc_public 739 * hal_ioc_public
740 */ 740 */
741 741
742/** 742/*
743 * Query IOC memory requirement information. 743 * Query IOC memory requirement information.
744 */ 744 */
745void 745void
@@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
755} 755}
756 756
757/** 757/*
758 * Query IOC memory requirement information. 758 * Query IOC memory requirement information.
759 */ 759 */
760void 760void
@@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
772 ioc->trcmod = bfa->trcmod; 772 ioc->trcmod = bfa->trcmod;
773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
774 774
775 /** 775 /*
776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
777 */ 777 */
778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) 778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
@@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
791} 791}
792 792
793/** 793/*
794 * Query IOC memory requirement information. 794 * Query IOC memory requirement information.
795 */ 795 */
796void 796void
@@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa)
799 bfa_ioc_detach(&bfa->ioc); 799 bfa_ioc_detach(&bfa->ioc);
800} 800}
801 801
802/** 802/*
803 * Query IOC memory requirement information. 803 * Query IOC memory requirement information.
804 */ 804 */
805void 805void
@@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
809 bfa_ioc_enable(&bfa->ioc); 809 bfa_ioc_enable(&bfa->ioc);
810} 810}
811 811
812/** 812/*
813 * IOC start called from bfa_start(). Called to start IOC operations 813 * IOC start called from bfa_start(). Called to start IOC operations
814 * at driver instantiation for this instance. 814 * at driver instantiation for this instance.
815 */ 815 */
@@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa)
820 bfa_iocfc_start_submod(bfa); 820 bfa_iocfc_start_submod(bfa);
821} 821}
822 822
823/** 823/*
824 * IOC stop called from bfa_stop(). Called only when driver is unloaded 824 * IOC stop called from bfa_stop(). Called only when driver is unloaded
825 * for this instance. 825 * for this instance.
826 */ 826 */
@@ -876,12 +876,12 @@ bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; 876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
877 877
878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? 878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
879 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : 879 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
880 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); 880 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
881 881
882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? 882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
883 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : 883 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
884 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); 884 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
885 885
886 attr->config = iocfc->cfg; 886 attr->config = iocfc->cfg;
887} 887}
@@ -893,8 +893,8 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
893 struct bfi_iocfc_set_intr_req_s *m; 893 struct bfi_iocfc_set_intr_req_s *m;
894 894
895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; 895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
896 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); 896 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
897 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); 897 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
898 898
899 if (!bfa_iocfc_is_operational(bfa)) 899 if (!bfa_iocfc_is_operational(bfa))
900 return BFA_STATUS_OK; 900 return BFA_STATUS_OK;
@@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
926} 926}
927/** 927/*
928 * Enable IOC after it is disabled. 928 * Enable IOC after it is disabled.
929 */ 929 */
930void 930void
@@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
954} 954}
955 955
956/** 956/*
957 * Return boot target port wwns -- read from boot information in flash. 957 * Return boot target port wwns -- read from boot information in flash.
958 */ 958 */
959void 959void
@@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
998 return cfgrsp->pbc_cfg.nvports; 998 return cfgrsp->pbc_cfg.nvports;
999} 999}
1000 1000
1001/** 1001/*
1002 * hal_api 1002 * hal_api
1003 */ 1003 */
1004 1004
1005/** 1005/*
1006 * Use this function query the memory requirement of the BFA library. 1006 * Use this function query the memory requirement of the BFA library.
1007 * This function needs to be called before bfa_attach() to get the 1007 * This function needs to be called before bfa_attach() to get the
1008 * memory required of the BFA layer for a given driver configuration. 1008 * memory required of the BFA layer for a given driver configuration.
@@ -1038,7 +1038,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1038 1038
1039 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1039 bfa_assert((cfg != NULL) && (meminfo != NULL));
1040 1040
1041 bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1041 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1042 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = 1042 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1043 BFA_MEM_TYPE_KVA; 1043 BFA_MEM_TYPE_KVA;
1044 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = 1044 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
@@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1056} 1056}
1057 1057
1058/** 1058/*
1059 * Use this function to do attach the driver instance with the BFA 1059 * Use this function to do attach the driver instance with the BFA
1060 * library. This function will not trigger any HW initialization 1060 * library. This function will not trigger any HW initialization
1061 * process (which will be done in bfa_init() call) 1061 * process (which will be done in bfa_init() call)
@@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1092 1092
1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1093 bfa_assert((cfg != NULL) && (meminfo != NULL));
1094 1094
1095 /** 1095 /*
1096 * initialize all memory pointers for iterative allocation 1096 * initialize all memory pointers for iterative allocation
1097 */ 1097 */
1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
@@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1109 bfa_com_port_attach(bfa, meminfo); 1109 bfa_com_port_attach(bfa, meminfo);
1110} 1110}
1111 1111
1112/** 1112/*
1113 * Use this function to delete a BFA IOC. IOC should be stopped (by 1113 * Use this function to delete a BFA IOC. IOC should be stopped (by
1114 * calling bfa_stop()) before this function call. 1114 * calling bfa_stop()) before this function call.
1115 * 1115 *
@@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
1146 bfa->plog = plog; 1146 bfa->plog = plog;
1147} 1147}
1148 1148
1149/** 1149/*
1150 * Initialize IOC. 1150 * Initialize IOC.
1151 * 1151 *
1152 * This function will return immediately, when the IOC initialization is 1152 * This function will return immediately, when the IOC initialization is
@@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa)
1169 bfa_iocfc_init(bfa); 1169 bfa_iocfc_init(bfa);
1170} 1170}
1171 1171
1172/** 1172/*
1173 * Use this function initiate the IOC configuration setup. This function 1173 * Use this function initiate the IOC configuration setup. This function
1174 * will return immediately. 1174 * will return immediately.
1175 * 1175 *
@@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa)
1183 bfa_iocfc_start(bfa); 1183 bfa_iocfc_start(bfa);
1184} 1184}
1185 1185
1186/** 1186/*
1187 * Use this function quiese the IOC. This function will return immediately, 1187 * Use this function quiese the IOC. This function will return immediately,
1188 * when the IOC is actually stopped, the bfad->comp will be set. 1188 * when the IOC is actually stopped, the bfad->comp will be set.
1189 * 1189 *
@@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa)
1243 bfa->fcs = BFA_TRUE; 1243 bfa->fcs = BFA_TRUE;
1244} 1244}
1245 1245
1246/** 1246/*
1247 * Periodic timer heart beat from driver 1247 * Periodic timer heart beat from driver
1248 */ 1248 */
1249void 1249void
@@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa)
1252 bfa_timer_beat(&bfa->timer_mod); 1252 bfa_timer_beat(&bfa->timer_mod);
1253} 1253}
1254 1254
1255/** 1255/*
1256 * Return the list of PCI vendor/device id lists supported by this 1256 * Return the list of PCI vendor/device id lists supported by this
1257 * BFA instance. 1257 * BFA instance.
1258 */ 1258 */
@@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1270 *pciids = __pciids; 1270 *pciids = __pciids;
1271} 1271}
1272 1272
1273/** 1273/*
1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1275 * into BFA layer). The OS driver can then turn back and overwrite entries that 1275 * into BFA layer). The OS driver can then turn back and overwrite entries that
1276 * have been configured by the user. 1276 * have been configured by the user.
@@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
1329} 1329}
1330 1330
1331/** 1331/*
1332 * Retrieve firmware trace information on IOC failure. 1332 * Retrieve firmware trace information on IOC failure.
1333 */ 1333 */
1334bfa_status_t 1334bfa_status_t
@@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); 1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
1338} 1338}
1339 1339
1340/** 1340/*
1341 * Clear the saved firmware trace information of an IOC. 1341 * Clear the saved firmware trace information of an IOC.
1342 */ 1342 */
1343void 1343void
@@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc); 1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc);
1347} 1347}
1348 1348
1349/** 1349/*
1350 * Fetch firmware trace data. 1350 * Fetch firmware trace data.
1351 * 1351 *
1352 * @param[in] bfa BFA instance 1352 * @param[in] bfa BFA instance
@@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
1363} 1363}
1364 1364
1365/** 1365/*
1366 * Dump firmware memory. 1366 * Dump firmware memory.
1367 * 1367 *
1368 * @param[in] bfa BFA instance 1368 * @param[in] bfa BFA instance
@@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
1378{ 1378{
1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); 1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
1380} 1380}
1381/** 1381/*
1382 * Reset hw semaphore & usage cnt regs and initialize. 1382 * Reset hw semaphore & usage cnt regs and initialize.
1383 */ 1383 */
1384void 1384void
@@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa)
1388 bfa_ioc_pll_init(&bfa->ioc); 1388 bfa_ioc_pll_init(&bfa->ioc);
1389} 1389}
1390 1390
1391/** 1391/*
1392 * Fetch firmware statistics data. 1392 * Fetch firmware statistics data.
1393 * 1393 *
1394 * @param[in] bfa BFA instance 1394 * @param[in] bfa BFA instance
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 7260c74620f..99f242b9aa3 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfa_cs.h BFA common services 19 * bfa_cs.h BFA common services
20 */ 20 */
21 21
@@ -24,7 +24,7 @@
24 24
25#include "bfa_os_inc.h" 25#include "bfa_os_inc.h"
26 26
27/** 27/*
28 * BFA TRC 28 * BFA TRC
29 */ 29 */
30 30
@@ -73,7 +73,7 @@ enum {
73#define BFA_TRC_MOD_SH 10 73#define BFA_TRC_MOD_SH 10
74#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) 74#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
75 75
76/** 76/*
77 * Define a new tracing file (module). Module should match one defined above. 77 * Define a new tracing file (module). Module should match one defined above.
78 */ 78 */
79#define BFA_TRC_FILE(__mod, __submod) \ 79#define BFA_TRC_FILE(__mod, __submod) \
@@ -155,7 +155,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
155#define bfa_trc_fp(_trcp, _data) 155#define bfa_trc_fp(_trcp, _data)
156#endif 156#endif
157 157
158/** 158/*
159 * @ BFA LOG interfaces 159 * @ BFA LOG interfaces
160 */ 160 */
161#define bfa_assert(__cond) do { \ 161#define bfa_assert(__cond) do { \
@@ -249,13 +249,13 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
249#define bfa_q_is_on_q(_q, _qe) \ 249#define bfa_q_is_on_q(_q, _qe) \
250 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) 250 bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
251 251
252/** 252/*
253 * @ BFA state machine interfaces 253 * @ BFA state machine interfaces
254 */ 254 */
255 255
256typedef void (*bfa_sm_t)(void *sm, int event); 256typedef void (*bfa_sm_t)(void *sm, int event);
257 257
258/** 258/*
259 * oc - object class eg. bfa_ioc 259 * oc - object class eg. bfa_ioc
260 * st - state, eg. reset 260 * st - state, eg. reset
261 * otype - object type, eg. struct bfa_ioc_s 261 * otype - object type, eg. struct bfa_ioc_s
@@ -269,7 +269,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
269#define bfa_sm_get_state(_sm) ((_sm)->sm) 269#define bfa_sm_get_state(_sm) ((_sm)->sm)
270#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) 270#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
271 271
272/** 272/*
273 * For converting from state machine function to state encoding. 273 * For converting from state machine function to state encoding.
274 */ 274 */
275struct bfa_sm_table_s { 275struct bfa_sm_table_s {
@@ -279,12 +279,12 @@ struct bfa_sm_table_s {
279}; 279};
280#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) 280#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
281 281
282/** 282/*
283 * State machine with entry actions. 283 * State machine with entry actions.
284 */ 284 */
285typedef void (*bfa_fsm_t)(void *fsm, int event); 285typedef void (*bfa_fsm_t)(void *fsm, int event);
286 286
287/** 287/*
288 * oc - object class eg. bfa_ioc 288 * oc - object class eg. bfa_ioc
289 * st - state, eg. reset 289 * st - state, eg. reset
290 * otype - object type, eg. struct bfa_ioc_s 290 * otype - object type, eg. struct bfa_ioc_s
@@ -314,7 +314,7 @@ bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
314 return smt[i].state; 314 return smt[i].state;
315} 315}
316 316
317/** 317/*
318 * @ Generic wait counter. 318 * @ Generic wait counter.
319 */ 319 */
320 320
@@ -340,7 +340,7 @@ bfa_wc_down(struct bfa_wc_s *wc)
340 wc->wc_resume(wc->wc_cbarg); 340 wc->wc_resume(wc->wc_cbarg);
341} 341}
342 342
343/** 343/*
344 * Initialize a waiting counter. 344 * Initialize a waiting counter.
345 */ 345 */
346static inline void 346static inline void
@@ -352,7 +352,7 @@ bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
352 bfa_wc_up(wc); 352 bfa_wc_up(wc);
353} 353}
354 354
355/** 355/*
356 * Wait for counter to reach zero 356 * Wait for counter to reach zero
357 */ 357 */
358static inline void 358static inline void
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index d49877ff514..4b5b9e35abb 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -24,7 +24,7 @@
24#define BFA_MFG_SERIALNUM_SIZE 11 24#define BFA_MFG_SERIALNUM_SIZE 11
25#define STRSZ(_n) (((_n) + 4) & ~3) 25#define STRSZ(_n) (((_n) + 4) & ~3)
26 26
27/** 27/*
28 * Manufacturing card type 28 * Manufacturing card type
29 */ 29 */
30enum { 30enum {
@@ -45,7 +45,7 @@ enum {
45 45
46#pragma pack(1) 46#pragma pack(1)
47 47
48/** 48/*
49 * Check if Mezz card 49 * Check if Mezz card
50 */ 50 */
51#define bfa_mfg_is_mezz(type) (( \ 51#define bfa_mfg_is_mezz(type) (( \
@@ -55,7 +55,7 @@ enum {
55 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ 55 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
56 (type) == BFA_MFG_TYPE_LIGHTNING)) 56 (type) == BFA_MFG_TYPE_LIGHTNING))
57 57
58/** 58/*
59 * Check if the card having old wwn/mac handling 59 * Check if the card having old wwn/mac handling
60 */ 60 */
61#define bfa_mfg_is_old_wwn_mac_model(type) (( \ 61#define bfa_mfg_is_old_wwn_mac_model(type) (( \
@@ -78,12 +78,12 @@ do { \
78 (m)[2] = t & 0xFF; \ 78 (m)[2] = t & 0xFF; \
79} while (0) 79} while (0)
80 80
81/** 81/*
82 * VPD data length 82 * VPD data length
83 */ 83 */
84#define BFA_MFG_VPD_LEN 512 84#define BFA_MFG_VPD_LEN 512
85 85
86/** 86/*
87 * VPD vendor tag 87 * VPD vendor tag
88 */ 88 */
89enum { 89enum {
@@ -97,7 +97,7 @@ enum {
97 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ 97 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
98}; 98};
99 99
100/** 100/*
101 * All numerical fields are in big-endian format. 101 * All numerical fields are in big-endian format.
102 */ 102 */
103struct bfa_mfg_vpd_s { 103struct bfa_mfg_vpd_s {
@@ -112,7 +112,7 @@ struct bfa_mfg_vpd_s {
112 112
113#pragma pack() 113#pragma pack()
114 114
115/** 115/*
116 * Status return values 116 * Status return values
117 */ 117 */
118enum bfa_status { 118enum bfa_status {
@@ -167,11 +167,11 @@ enum bfa_boolean {
167#define BFA_STRING_32 32 167#define BFA_STRING_32 32
168#define BFA_VERSION_LEN 64 168#define BFA_VERSION_LEN 64
169 169
170/** 170/*
171 * ---------------------- adapter definitions ------------ 171 * ---------------------- adapter definitions ------------
172 */ 172 */
173 173
174/** 174/*
175 * BFA adapter level attributes. 175 * BFA adapter level attributes.
176 */ 176 */
177enum { 177enum {
@@ -215,7 +215,7 @@ struct bfa_adapter_attr_s {
215 u8 trunk_capable; 215 u8 trunk_capable;
216}; 216};
217 217
218/** 218/*
219 * ---------------------- IOC definitions ------------ 219 * ---------------------- IOC definitions ------------
220 */ 220 */
221 221
@@ -224,7 +224,7 @@ enum {
224 BFA_IOC_CHIP_REV_LEN = 8, 224 BFA_IOC_CHIP_REV_LEN = 8,
225}; 225};
226 226
227/** 227/*
228 * Driver and firmware versions. 228 * Driver and firmware versions.
229 */ 229 */
230struct bfa_ioc_driver_attr_s { 230struct bfa_ioc_driver_attr_s {
@@ -236,7 +236,7 @@ struct bfa_ioc_driver_attr_s {
236 char ob_ver[BFA_VERSION_LEN]; /* openboot version */ 236 char ob_ver[BFA_VERSION_LEN]; /* openboot version */
237}; 237};
238 238
239/** 239/*
240 * IOC PCI device attributes 240 * IOC PCI device attributes
241 */ 241 */
242struct bfa_ioc_pci_attr_s { 242struct bfa_ioc_pci_attr_s {
@@ -249,7 +249,7 @@ struct bfa_ioc_pci_attr_s {
249 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ 249 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
250}; 250};
251 251
252/** 252/*
253 * IOC states 253 * IOC states
254 */ 254 */
255enum bfa_ioc_state { 255enum bfa_ioc_state {
@@ -267,7 +267,7 @@ enum bfa_ioc_state {
267 BFA_IOC_ENABLING = 12, /* IOC is being enabled */ 267 BFA_IOC_ENABLING = 12, /* IOC is being enabled */
268}; 268};
269 269
270/** 270/*
271 * IOC firmware stats 271 * IOC firmware stats
272 */ 272 */
273struct bfa_fw_ioc_stats_s { 273struct bfa_fw_ioc_stats_s {
@@ -279,7 +279,7 @@ struct bfa_fw_ioc_stats_s {
279 u32 unknown_reqs; 279 u32 unknown_reqs;
280}; 280};
281 281
282/** 282/*
283 * IOC driver stats 283 * IOC driver stats
284 */ 284 */
285struct bfa_ioc_drv_stats_s { 285struct bfa_ioc_drv_stats_s {
@@ -296,7 +296,7 @@ struct bfa_ioc_drv_stats_s {
296 u32 enable_replies; 296 u32 enable_replies;
297}; 297};
298 298
299/** 299/*
300 * IOC statistics 300 * IOC statistics
301 */ 301 */
302struct bfa_ioc_stats_s { 302struct bfa_ioc_stats_s {
@@ -310,7 +310,7 @@ enum bfa_ioc_type_e {
310 BFA_IOC_TYPE_LL = 3, 310 BFA_IOC_TYPE_LL = 3,
311}; 311};
312 312
313/** 313/*
314 * IOC attributes returned in queries 314 * IOC attributes returned in queries
315 */ 315 */
316struct bfa_ioc_attr_s { 316struct bfa_ioc_attr_s {
@@ -323,11 +323,11 @@ struct bfa_ioc_attr_s {
323 u8 rsvd[7]; /* 64bit align */ 323 u8 rsvd[7]; /* 64bit align */
324}; 324};
325 325
326/** 326/*
327 * ---------------------- mfg definitions ------------ 327 * ---------------------- mfg definitions ------------
328 */ 328 */
329 329
330/** 330/*
331 * Checksum size 331 * Checksum size
332 */ 332 */
333#define BFA_MFG_CHKSUM_SIZE 16 333#define BFA_MFG_CHKSUM_SIZE 16
@@ -340,7 +340,7 @@ struct bfa_ioc_attr_s {
340 340
341#pragma pack(1) 341#pragma pack(1)
342 342
343/** 343/*
344 * All numerical fields are in big-endian format. 344 * All numerical fields are in big-endian format.
345 */ 345 */
346struct bfa_mfg_block_s { 346struct bfa_mfg_block_s {
@@ -373,11 +373,11 @@ struct bfa_mfg_block_s {
373 373
374#pragma pack() 374#pragma pack()
375 375
376/** 376/*
377 * ---------------------- pci definitions ------------ 377 * ---------------------- pci definitions ------------
378 */ 378 */
379 379
380/** 380/*
381 * PCI device and vendor ID information 381 * PCI device and vendor ID information
382 */ 382 */
383enum { 383enum {
@@ -392,14 +392,14 @@ enum {
392 ((devid) == BFA_PCI_DEVICE_ID_CT || \ 392 ((devid) == BFA_PCI_DEVICE_ID_CT || \
393 (devid) == BFA_PCI_DEVICE_ID_CT_FC) 393 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
394 394
395/** 395/*
396 * PCI sub-system device and vendor ID information 396 * PCI sub-system device and vendor ID information
397 */ 397 */
398enum { 398enum {
399 BFA_PCI_FCOE_SSDEVICE_ID = 0x14, 399 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
400}; 400};
401 401
402/** 402/*
403 * Maximum number of device address ranges mapped through different BAR(s) 403 * Maximum number of device address ranges mapped through different BAR(s)
404 */ 404 */
405#define BFA_PCI_ACCESS_RANGES 1 405#define BFA_PCI_ACCESS_RANGES 1
@@ -430,7 +430,7 @@ enum {
430#define BOOT_CFG_REV1 1 430#define BOOT_CFG_REV1 1
431#define BOOT_CFG_VLAN 1 431#define BOOT_CFG_VLAN 1
432 432
433/** 433/*
434 * Boot options setting. Boot options setting determines from where 434 * Boot options setting. Boot options setting determines from where
435 * to get the boot lun information 435 * to get the boot lun information
436 */ 436 */
@@ -442,7 +442,7 @@ enum bfa_boot_bootopt {
442}; 442};
443 443
444#pragma pack(1) 444#pragma pack(1)
445/** 445/*
446 * Boot lun information. 446 * Boot lun information.
447 */ 447 */
448struct bfa_boot_bootlun_s { 448struct bfa_boot_bootlun_s {
@@ -451,7 +451,7 @@ struct bfa_boot_bootlun_s {
451}; 451};
452#pragma pack() 452#pragma pack()
453 453
454/** 454/*
455 * BOOT boot configuraton 455 * BOOT boot configuraton
456 */ 456 */
457struct bfa_boot_pbc_s { 457struct bfa_boot_pbc_s {
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 96905d30182..191d34a58b9 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -21,7 +21,7 @@
21#include "bfa_fc.h" 21#include "bfa_fc.h"
22#include "bfa_defs_svc.h" 22#include "bfa_defs_svc.h"
23 23
24/** 24/*
25 * VF states 25 * VF states
26 */ 26 */
27enum bfa_vf_state { 27enum bfa_vf_state {
@@ -35,7 +35,7 @@ enum bfa_vf_state {
35 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */ 35 BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
36}; 36};
37 37
38/** 38/*
39 * VF statistics 39 * VF statistics
40 */ 40 */
41struct bfa_vf_stats_s { 41struct bfa_vf_stats_s {
@@ -55,7 +55,7 @@ struct bfa_vf_stats_s {
55 u32 resvd; /* padding for 64 bit alignment */ 55 u32 resvd; /* padding for 64 bit alignment */
56}; 56};
57 57
58/** 58/*
59 * VF attributes returned in queries 59 * VF attributes returned in queries
60 */ 60 */
61struct bfa_vf_attr_s { 61struct bfa_vf_attr_s {
@@ -67,7 +67,7 @@ struct bfa_vf_attr_s {
67#define BFA_FCS_MAX_LPORTS 256 67#define BFA_FCS_MAX_LPORTS 256
68#define BFA_FCS_FABRIC_IPADDR_SZ 16 68#define BFA_FCS_FABRIC_IPADDR_SZ 16
69 69
70/** 70/*
71 * symbolic names for base port/virtual port 71 * symbolic names for base port/virtual port
72 */ 72 */
73#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */ 73#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
@@ -75,7 +75,7 @@ struct bfa_lport_symname_s {
75 char symname[BFA_SYMNAME_MAXLEN]; 75 char symname[BFA_SYMNAME_MAXLEN];
76}; 76};
77 77
78/** 78/*
79* Roles of FCS port: 79* Roles of FCS port:
80 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port 80 * - FCP IM and FCP TM roles cannot be enabled together for a FCS port
81 * - Create multiple ports if both IM and TM functions required. 81 * - Create multiple ports if both IM and TM functions required.
@@ -86,19 +86,19 @@ enum bfa_lport_role {
86 BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM, 86 BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
87}; 87};
88 88
89/** 89/*
90 * FCS port configuration. 90 * FCS port configuration.
91 */ 91 */
92struct bfa_lport_cfg_s { 92struct bfa_lport_cfg_s {
93 wwn_t pwwn; /* port wwn */ 93 wwn_t pwwn; /* port wwn */
94 wwn_t nwwn; /* node wwn */ 94 wwn_t nwwn; /* node wwn */
95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ 95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
96 bfa_boolean_t preboot_vp; /* vport created from PBC */ 96 bfa_boolean_t preboot_vp; /* vport created from PBC */
97 enum bfa_lport_role roles; /* FCS port roles */ 97 enum bfa_lport_role roles; /* FCS port roles */
98 u8 tag[16]; /* opaque tag from application */ 98 u8 tag[16]; /* opaque tag from application */
99}; 99};
100 100
101/** 101/*
102 * FCS port states 102 * FCS port states
103 */ 103 */
104enum bfa_lport_state { 104enum bfa_lport_state {
@@ -108,7 +108,7 @@ enum bfa_lport_state {
108 BFA_LPORT_OFFLINE = 3, /* No login to fabric */ 108 BFA_LPORT_OFFLINE = 3, /* No login to fabric */
109}; 109};
110 110
111/** 111/*
112 * FCS port type. 112 * FCS port type.
113 */ 113 */
114enum bfa_lport_type { 114enum bfa_lport_type {
@@ -116,7 +116,7 @@ enum bfa_lport_type {
116 BFA_LPORT_TYPE_VIRTUAL, 116 BFA_LPORT_TYPE_VIRTUAL,
117}; 117};
118 118
119/** 119/*
120 * FCS port offline reason. 120 * FCS port offline reason.
121 */ 121 */
122enum bfa_lport_offline_reason { 122enum bfa_lport_offline_reason {
@@ -128,7 +128,7 @@ enum bfa_lport_offline_reason {
128 BFA_LPORT_OFFLINE_FAB_LOGOUT, 128 BFA_LPORT_OFFLINE_FAB_LOGOUT,
129}; 129};
130 130
131/** 131/*
132 * FCS lport info. 132 * FCS lport info.
133 */ 133 */
134struct bfa_lport_info_s { 134struct bfa_lport_info_s {
@@ -150,7 +150,7 @@ struct bfa_lport_info_s {
150 150
151}; 151};
152 152
153/** 153/*
154 * FCS port statistics 154 * FCS port statistics
155 */ 155 */
156struct bfa_lport_stats_s { 156struct bfa_lport_stats_s {
@@ -222,7 +222,7 @@ struct bfa_lport_stats_s {
222 * (max retry of plogi) */ 222 * (max retry of plogi) */
223}; 223};
224 224
225/** 225/*
226 * BFA port attribute returned in queries 226 * BFA port attribute returned in queries
227 */ 227 */
228struct bfa_lport_attr_s { 228struct bfa_lport_attr_s {
@@ -239,7 +239,7 @@ struct bfa_lport_attr_s {
239}; 239};
240 240
241 241
242/** 242/*
243 * VPORT states 243 * VPORT states
244 */ 244 */
245enum bfa_vport_state { 245enum bfa_vport_state {
@@ -258,7 +258,7 @@ enum bfa_vport_state {
258 BFA_FCS_VPORT_MAX_STATE, 258 BFA_FCS_VPORT_MAX_STATE,
259}; 259};
260 260
261/** 261/*
262 * vport statistics 262 * vport statistics
263 */ 263 */
264struct bfa_vport_stats_s { 264struct bfa_vport_stats_s {
@@ -296,7 +296,7 @@ struct bfa_vport_stats_s {
296 u32 rsvd; 296 u32 rsvd;
297}; 297};
298 298
299/** 299/*
300 * BFA vport attribute returned in queries 300 * BFA vport attribute returned in queries
301 */ 301 */
302struct bfa_vport_attr_s { 302struct bfa_vport_attr_s {
@@ -305,7 +305,7 @@ struct bfa_vport_attr_s {
305 u32 rsvd; 305 u32 rsvd;
306}; 306};
307 307
308/** 308/*
309 * FCS remote port states 309 * FCS remote port states
310 */ 310 */
311enum bfa_rport_state { 311enum bfa_rport_state {
@@ -321,7 +321,7 @@ enum bfa_rport_state {
321 BFA_RPORT_NSDISC = 9, /* re-discover rport */ 321 BFA_RPORT_NSDISC = 9, /* re-discover rport */
322}; 322};
323 323
324/** 324/*
325 * Rport Scsi Function : Initiator/Target. 325 * Rport Scsi Function : Initiator/Target.
326 */ 326 */
327enum bfa_rport_function { 327enum bfa_rport_function {
@@ -329,7 +329,7 @@ enum bfa_rport_function {
329 BFA_RPORT_TARGET = 0x02, /* SCSI Target */ 329 BFA_RPORT_TARGET = 0x02, /* SCSI Target */
330}; 330};
331 331
332/** 332/*
333 * port/node symbolic names for rport 333 * port/node symbolic names for rport
334 */ 334 */
335#define BFA_RPORT_SYMNAME_MAXLEN 255 335#define BFA_RPORT_SYMNAME_MAXLEN 255
@@ -337,7 +337,7 @@ struct bfa_rport_symname_s {
337 char symname[BFA_RPORT_SYMNAME_MAXLEN]; 337 char symname[BFA_RPORT_SYMNAME_MAXLEN];
338}; 338};
339 339
340/** 340/*
341 * FCS remote port statistics 341 * FCS remote port statistics
342 */ 342 */
343struct bfa_rport_stats_s { 343struct bfa_rport_stats_s {
@@ -374,7 +374,7 @@ struct bfa_rport_stats_s {
374 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */ 374 struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
375}; 375};
376 376
377/** 377/*
378 * FCS remote port attributes returned in queries 378 * FCS remote port attributes returned in queries
379 */ 379 */
380struct bfa_rport_attr_s { 380struct bfa_rport_attr_s {
@@ -411,7 +411,7 @@ struct bfa_rport_remote_link_stats_s {
411#define BFA_MAX_IO_INDEX 7 411#define BFA_MAX_IO_INDEX 7
412#define BFA_NO_IO_INDEX 9 412#define BFA_NO_IO_INDEX 9
413 413
414/** 414/*
415 * FCS itnim states 415 * FCS itnim states
416 */ 416 */
417enum bfa_itnim_state { 417enum bfa_itnim_state {
@@ -425,7 +425,7 @@ enum bfa_itnim_state {
425 BFA_ITNIM_INITIATIOR = 7, /* initiator */ 425 BFA_ITNIM_INITIATIOR = 7, /* initiator */
426}; 426};
427 427
428/** 428/*
429 * FCS remote port statistics 429 * FCS remote port statistics
430 */ 430 */
431struct bfa_itnim_stats_s { 431struct bfa_itnim_stats_s {
@@ -443,7 +443,7 @@ struct bfa_itnim_stats_s {
443 u32 rsvd; /* padding for 64 bit alignment */ 443 u32 rsvd; /* padding for 64 bit alignment */
444}; 444};
445 445
446/** 446/*
447 * FCS itnim attributes returned in queries 447 * FCS itnim attributes returned in queries
448 */ 448 */
449struct bfa_itnim_attr_s { 449struct bfa_itnim_attr_s {
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 56226fcf947..e24e9f7ca81 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -27,7 +27,7 @@
27#define BFA_IOCFCOE_INTR_DELAY 25 27#define BFA_IOCFCOE_INTR_DELAY 25
28#define BFA_IOCFCOE_INTR_LATENCY 5 28#define BFA_IOCFCOE_INTR_LATENCY 5
29 29
30/** 30/*
31 * Interrupt coalescing configuration. 31 * Interrupt coalescing configuration.
32 */ 32 */
33#pragma pack(1) 33#pragma pack(1)
@@ -38,7 +38,7 @@ struct bfa_iocfc_intr_attr_s {
38 u16 delay; /* delay in microseconds */ 38 u16 delay; /* delay in microseconds */
39}; 39};
40 40
41/** 41/*
42 * IOC firmware configuraton 42 * IOC firmware configuraton
43 */ 43 */
44struct bfa_iocfc_fwcfg_s { 44struct bfa_iocfc_fwcfg_s {
@@ -71,7 +71,7 @@ struct bfa_iocfc_drvcfg_s {
71 u32 rsvd; 71 u32 rsvd;
72}; 72};
73 73
74/** 74/*
75 * IOC configuration 75 * IOC configuration
76 */ 76 */
77struct bfa_iocfc_cfg_s { 77struct bfa_iocfc_cfg_s {
@@ -79,7 +79,7 @@ struct bfa_iocfc_cfg_s {
79 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */ 79 struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */
80}; 80};
81 81
82/** 82/*
83 * IOC firmware IO stats 83 * IOC firmware IO stats
84 */ 84 */
85struct bfa_fw_io_stats_s { 85struct bfa_fw_io_stats_s {
@@ -152,7 +152,7 @@ struct bfa_fw_io_stats_s {
152 */ 152 */
153}; 153};
154 154
155/** 155/*
156 * IOC port firmware stats 156 * IOC port firmware stats
157 */ 157 */
158 158
@@ -262,7 +262,7 @@ struct bfa_fw_fcoe_stats_s {
262 u32 mac_invalids; /* Invalid mac assigned */ 262 u32 mac_invalids; /* Invalid mac assigned */
263}; 263};
264 264
265/** 265/*
266 * IOC firmware FCoE port stats 266 * IOC firmware FCoE port stats
267 */ 267 */
268struct bfa_fw_fcoe_port_stats_s { 268struct bfa_fw_fcoe_port_stats_s {
@@ -270,7 +270,7 @@ struct bfa_fw_fcoe_port_stats_s {
270 struct bfa_fw_fip_stats_s fip_stats; 270 struct bfa_fw_fip_stats_s fip_stats;
271}; 271};
272 272
273/** 273/*
274 * IOC firmware FC uport stats 274 * IOC firmware FC uport stats
275 */ 275 */
276struct bfa_fw_fc_uport_stats_s { 276struct bfa_fw_fc_uport_stats_s {
@@ -278,7 +278,7 @@ struct bfa_fw_fc_uport_stats_s {
278 struct bfa_fw_port_lksm_stats_s lksm_stats; 278 struct bfa_fw_port_lksm_stats_s lksm_stats;
279}; 279};
280 280
281/** 281/*
282 * IOC firmware FC port stats 282 * IOC firmware FC port stats
283 */ 283 */
284union bfa_fw_fc_port_stats_s { 284union bfa_fw_fc_port_stats_s {
@@ -286,7 +286,7 @@ union bfa_fw_fc_port_stats_s {
286 struct bfa_fw_fcoe_port_stats_s fcoe_stats; 286 struct bfa_fw_fcoe_port_stats_s fcoe_stats;
287}; 287};
288 288
289/** 289/*
290 * IOC firmware port stats 290 * IOC firmware port stats
291 */ 291 */
292struct bfa_fw_port_stats_s { 292struct bfa_fw_port_stats_s {
@@ -295,7 +295,7 @@ struct bfa_fw_port_stats_s {
295 union bfa_fw_fc_port_stats_s fc_port; 295 union bfa_fw_fc_port_stats_s fc_port;
296}; 296};
297 297
298/** 298/*
299 * fcxchg module statistics 299 * fcxchg module statistics
300 */ 300 */
301struct bfa_fw_fcxchg_stats_s { 301struct bfa_fw_fcxchg_stats_s {
@@ -308,7 +308,7 @@ struct bfa_fw_lpsm_stats_s {
308 u32 cls_tx; 308 u32 cls_tx;
309}; 309};
310 310
311/** 311/*
312 * Trunk statistics 312 * Trunk statistics
313 */ 313 */
314struct bfa_fw_trunk_stats_s { 314struct bfa_fw_trunk_stats_s {
@@ -334,7 +334,7 @@ struct bfa_fw_advsm_stats_s {
334 u32 elp_dropped; /* ELP dropped */ 334 u32 elp_dropped; /* ELP dropped */
335}; 335};
336 336
337/** 337/*
338 * IOCFC firmware stats 338 * IOCFC firmware stats
339 */ 339 */
340struct bfa_fw_iocfc_stats_s { 340struct bfa_fw_iocfc_stats_s {
@@ -345,7 +345,7 @@ struct bfa_fw_iocfc_stats_s {
345 u32 set_intr_reqs; /* set interrupt reqs */ 345 u32 set_intr_reqs; /* set interrupt reqs */
346}; 346};
347 347
348/** 348/*
349 * IOC attributes returned in queries 349 * IOC attributes returned in queries
350 */ 350 */
351struct bfa_iocfc_attr_s { 351struct bfa_iocfc_attr_s {
@@ -353,7 +353,7 @@ struct bfa_iocfc_attr_s {
353 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */ 353 struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */
354}; 354};
355 355
356/** 356/*
357 * Eth_sndrcv mod stats 357 * Eth_sndrcv mod stats
358 */ 358 */
359struct bfa_fw_eth_sndrcv_stats_s { 359struct bfa_fw_eth_sndrcv_stats_s {
@@ -361,7 +361,7 @@ struct bfa_fw_eth_sndrcv_stats_s {
361 u32 rsvd; /* 64bit align */ 361 u32 rsvd; /* 64bit align */
362}; 362};
363 363
364/** 364/*
365 * CT MAC mod stats 365 * CT MAC mod stats
366 */ 366 */
367struct bfa_fw_mac_mod_stats_s { 367struct bfa_fw_mac_mod_stats_s {
@@ -379,7 +379,7 @@ struct bfa_fw_mac_mod_stats_s {
379 u32 rsvd; /* 64bit align */ 379 u32 rsvd; /* 64bit align */
380}; 380};
381 381
382/** 382/*
383 * CT MOD stats 383 * CT MOD stats
384 */ 384 */
385struct bfa_fw_ct_mod_stats_s { 385struct bfa_fw_ct_mod_stats_s {
@@ -391,7 +391,7 @@ struct bfa_fw_ct_mod_stats_s {
391 u32 rsvd; /* 64bit align */ 391 u32 rsvd; /* 64bit align */
392}; 392};
393 393
394/** 394/*
395 * IOC firmware stats 395 * IOC firmware stats
396 */ 396 */
397struct bfa_fw_stats_s { 397struct bfa_fw_stats_s {
@@ -412,7 +412,7 @@ struct bfa_fw_stats_s {
412#define BFA_IOCFC_PATHTOV_MAX 60 412#define BFA_IOCFC_PATHTOV_MAX 60
413#define BFA_IOCFC_QDEPTH_MAX 2000 413#define BFA_IOCFC_QDEPTH_MAX 2000
414 414
415/** 415/*
416 * QoS states 416 * QoS states
417 */ 417 */
418enum bfa_qos_state { 418enum bfa_qos_state {
@@ -420,7 +420,7 @@ enum bfa_qos_state {
420 BFA_QOS_OFFLINE = 2, /* QoS is offline */ 420 BFA_QOS_OFFLINE = 2, /* QoS is offline */
421}; 421};
422 422
423/** 423/*
424 * QoS Priority levels. 424 * QoS Priority levels.
425 */ 425 */
426enum bfa_qos_priority { 426enum bfa_qos_priority {
@@ -430,7 +430,7 @@ enum bfa_qos_priority {
430 BFA_QOS_LOW = 3, /* QoS Priority Level Low */ 430 BFA_QOS_LOW = 3, /* QoS Priority Level Low */
431}; 431};
432 432
433/** 433/*
434 * QoS bandwidth allocation for each priority level 434 * QoS bandwidth allocation for each priority level
435 */ 435 */
436enum bfa_qos_bw_alloc { 436enum bfa_qos_bw_alloc {
@@ -439,7 +439,7 @@ enum bfa_qos_bw_alloc {
439 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ 439 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
440}; 440};
441#pragma pack(1) 441#pragma pack(1)
442/** 442/*
443 * QoS attribute returned in QoS Query 443 * QoS attribute returned in QoS Query
444 */ 444 */
445struct bfa_qos_attr_s { 445struct bfa_qos_attr_s {
@@ -448,7 +448,7 @@ struct bfa_qos_attr_s {
448 u32 total_bb_cr; /* Total BB Credits */ 448 u32 total_bb_cr; /* Total BB Credits */
449}; 449};
450 450
451/** 451/*
452 * These fields should be displayed only from the CLI. 452 * These fields should be displayed only from the CLI.
453 * There will be a separate BFAL API (get_qos_vc_attr ?) 453 * There will be a separate BFAL API (get_qos_vc_attr ?)
454 * to retrieve this. 454 * to retrieve this.
@@ -471,7 +471,7 @@ struct bfa_qos_vc_attr_s {
471 * total_vc_count */ 471 * total_vc_count */
472}; 472};
473 473
474/** 474/*
475 * QoS statistics 475 * QoS statistics
476 */ 476 */
477struct bfa_qos_stats_s { 477struct bfa_qos_stats_s {
@@ -489,7 +489,7 @@ struct bfa_qos_stats_s {
489 u32 rsvd; /* padding for 64 bit alignment */ 489 u32 rsvd; /* padding for 64 bit alignment */
490}; 490};
491 491
492/** 492/*
493 * FCoE statistics 493 * FCoE statistics
494 */ 494 */
495struct bfa_fcoe_stats_s { 495struct bfa_fcoe_stats_s {
@@ -540,7 +540,7 @@ struct bfa_fcoe_stats_s {
540 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ 540 u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
541}; 541};
542 542
543/** 543/*
544 * QoS or FCoE stats (fcport stats excluding physical FC port stats) 544 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
545 */ 545 */
546union bfa_fcport_stats_u { 546union bfa_fcport_stats_u {
@@ -639,7 +639,7 @@ enum bfa_port_states {
639 BFA_PORT_ST_MAX_STATE, 639 BFA_PORT_ST_MAX_STATE,
640}; 640};
641 641
642/** 642/*
643 * Port operational type (in sync with SNIA port type). 643 * Port operational type (in sync with SNIA port type).
644 */ 644 */
645enum bfa_port_type { 645enum bfa_port_type {
@@ -651,7 +651,7 @@ enum bfa_port_type {
651 BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */ 651 BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */
652}; 652};
653 653
654/** 654/*
655 * Port topology setting. A port's topology and fabric login status 655 * Port topology setting. A port's topology and fabric login status
656 * determine its operational type. 656 * determine its operational type.
657 */ 657 */
@@ -662,7 +662,7 @@ enum bfa_port_topology {
662 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ 662 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */
663}; 663};
664 664
665/** 665/*
666 * Physical port loopback types. 666 * Physical port loopback types.
667 */ 667 */
668enum bfa_port_opmode { 668enum bfa_port_opmode {
@@ -679,7 +679,7 @@ enum bfa_port_opmode {
679 (_mode == BFA_PORT_OPMODE_LB_SLW) || \ 679 (_mode == BFA_PORT_OPMODE_LB_SLW) || \
680 (_mode == BFA_PORT_OPMODE_LB_EXT)) 680 (_mode == BFA_PORT_OPMODE_LB_EXT))
681 681
682/** 682/*
683 * Port link state 683 * Port link state
684 */ 684 */
685enum bfa_port_linkstate { 685enum bfa_port_linkstate {
@@ -687,7 +687,7 @@ enum bfa_port_linkstate {
687 BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */ 687 BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */
688}; 688};
689 689
690/** 690/*
691 * Port link state reason code 691 * Port link state reason code
692 */ 692 */
693enum bfa_port_linkstate_rsn { 693enum bfa_port_linkstate_rsn {
@@ -733,7 +733,7 @@ enum bfa_port_linkstate_rsn {
733 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 733 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
734}; 734};
735#pragma pack(1) 735#pragma pack(1)
736/** 736/*
737 * Physical port configuration 737 * Physical port configuration
738 */ 738 */
739struct bfa_port_cfg_s { 739struct bfa_port_cfg_s {
@@ -753,7 +753,7 @@ struct bfa_port_cfg_s {
753}; 753};
754#pragma pack() 754#pragma pack()
755 755
756/** 756/*
757 * Port attribute values. 757 * Port attribute values.
758 */ 758 */
759struct bfa_port_attr_s { 759struct bfa_port_attr_s {
@@ -800,7 +800,7 @@ struct bfa_port_attr_s {
800 u8 rsvd1[6]; 800 u8 rsvd1[6];
801}; 801};
802 802
803/** 803/*
804 * Port FCP mappings. 804 * Port FCP mappings.
805 */ 805 */
806struct bfa_port_fcpmap_s { 806struct bfa_port_fcpmap_s {
@@ -815,7 +815,7 @@ struct bfa_port_fcpmap_s {
815 char luid[256]; 815 char luid[256];
816}; 816};
817 817
818/** 818/*
819 * Port RNID info. 819 * Port RNID info.
820 */ 820 */
821struct bfa_port_rnid_s { 821struct bfa_port_rnid_s {
@@ -848,7 +848,7 @@ struct bfa_fcport_fcf_s {
848 mac_t mac; /* FCF mac */ 848 mac_t mac; /* FCF mac */
849}; 849};
850 850
851/** 851/*
852 * Trunk states for BCU/BFAL 852 * Trunk states for BCU/BFAL
853 */ 853 */
854enum bfa_trunk_state { 854enum bfa_trunk_state {
@@ -857,7 +857,7 @@ enum bfa_trunk_state {
857 BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */ 857 BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */
858}; 858};
859 859
860/** 860/*
861 * VC attributes for trunked link 861 * VC attributes for trunked link
862 */ 862 */
863struct bfa_trunk_vc_attr_s { 863struct bfa_trunk_vc_attr_s {
@@ -867,7 +867,7 @@ struct bfa_trunk_vc_attr_s {
867 u16 vc_credits[8]; 867 u16 vc_credits[8];
868}; 868};
869 869
870/** 870/*
871 * Link state information 871 * Link state information
872 */ 872 */
873struct bfa_port_link_s { 873struct bfa_port_link_s {
@@ -959,7 +959,7 @@ struct bfa_rport_hal_stats_s {
959 u32 rsvd; 959 u32 rsvd;
960}; 960};
961#pragma pack(1) 961#pragma pack(1)
962/** 962/*
963 * Rport's QoS attributes 963 * Rport's QoS attributes
964 */ 964 */
965struct bfa_rport_qos_attr_s { 965struct bfa_rport_qos_attr_s {
@@ -987,7 +987,7 @@ struct bfa_itnim_ioprofile_s {
987 struct bfa_itnim_latency_s io_latency; 987 struct bfa_itnim_latency_s io_latency;
988}; 988};
989 989
990/** 990/*
991 * FC physical port statistics. 991 * FC physical port statistics.
992 */ 992 */
993struct bfa_port_fc_stats_s { 993struct bfa_port_fc_stats_s {
@@ -1022,7 +1022,7 @@ struct bfa_port_fc_stats_s {
1022 u64 err_enc; /* Encoding err frame_8b10b */ 1022 u64 err_enc; /* Encoding err frame_8b10b */
1023}; 1023};
1024 1024
1025/** 1025/*
1026 * Eth Physical Port statistics. 1026 * Eth Physical Port statistics.
1027 */ 1027 */
1028struct bfa_port_eth_stats_s { 1028struct bfa_port_eth_stats_s {
@@ -1070,7 +1070,7 @@ struct bfa_port_eth_stats_s {
1070 u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */ 1070 u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */
1071}; 1071};
1072 1072
1073/** 1073/*
1074 * Port statistics. 1074 * Port statistics.
1075 */ 1075 */
1076union bfa_port_stats_u { 1076union bfa_port_stats_u {
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c
index 14127646dc5..0222d7c88a9 100644
--- a/drivers/scsi/bfa/bfa_drv.c
+++ b/drivers/scsi/bfa/bfa_drv.c
@@ -17,7 +17,7 @@
17 17
18#include "bfa_modules.h" 18#include "bfa_modules.h"
19 19
20/** 20/*
21 * BFA module list terminated by NULL 21 * BFA module list terminated by NULL
22 */ 22 */
23struct bfa_module_s *hal_mods[] = { 23struct bfa_module_s *hal_mods[] = {
@@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = {
31 NULL 31 NULL
32}; 32};
33 33
34/** 34/*
35 * Message handlers for various modules. 35 * Message handlers for various modules.
36 */ 36 */
37bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 37bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
@@ -70,7 +70,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
70}; 70};
71 71
72 72
73/** 73/*
74 * Message handlers for mailbox command classes 74 * Message handlers for mailbox command classes
75 */ 75 */
76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 6eff705564e..e929d25b09e 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1029,7 +1029,7 @@ struct link_e2e_beacon_req_s {
1029 struct link_e2e_beacon_param_s beacon_parm; 1029 struct link_e2e_beacon_param_s beacon_parm;
1030}; 1030};
1031 1031
1032/** 1032/*
1033 * If RPSC request is sent to the Domain Controller, the request is for 1033 * If RPSC request is sent to the Domain Controller, the request is for
1034 * all the ports within that domain (TODO - I don't think FOS implements 1034 * all the ports within that domain (TODO - I don't think FOS implements
1035 * this...). 1035 * this...).
@@ -1049,7 +1049,7 @@ struct fc_rpsc_acc_s {
1049 struct fc_rpsc_speed_info_s speed_info[1]; 1049 struct fc_rpsc_speed_info_s speed_info[1];
1050}; 1050};
1051 1051
1052/** 1052/*
1053 * If RPSC2 request is sent to the Domain Controller, 1053 * If RPSC2 request is sent to the Domain Controller,
1054 */ 1054 */
1055#define FC_BRCD_TOKEN 0x42524344 1055#define FC_BRCD_TOKEN 0x42524344
@@ -1094,7 +1094,7 @@ struct fc_rpsc2_acc_s {
1094 struct fc_rpsc2_port_info_s port_info[1]; /* port information */ 1094 struct fc_rpsc2_port_info_s port_info[1]; /* port information */
1095}; 1095};
1096 1096
1097/** 1097/*
1098 * bit fields so that multiple classes can be specified 1098 * bit fields so that multiple classes can be specified
1099 */ 1099 */
1100enum fc_cos { 1100enum fc_cos {
@@ -1131,7 +1131,7 @@ struct fc_alpabm_s {
1131#define FC_VF_ID_MAX 0xEFF 1131#define FC_VF_ID_MAX 0xEFF
1132#define FC_VF_ID_CTL 0xFEF /* control VF_ID */ 1132#define FC_VF_ID_CTL 0xFEF /* control VF_ID */
1133 1133
1134/** 1134/*
1135 * Virtual Fabric Tagging header format 1135 * Virtual Fabric Tagging header format
1136 * @caution This is defined only in BIG ENDIAN format. 1136 * @caution This is defined only in BIG ENDIAN format.
1137 */ 1137 */
@@ -1463,7 +1463,7 @@ struct fcgs_gidpn_resp_s {
1463 u32 dap:24; /* port identifier */ 1463 u32 dap:24; /* port identifier */
1464}; 1464};
1465 1465
1466/** 1466/*
1467 * RFT_ID 1467 * RFT_ID
1468 */ 1468 */
1469struct fcgs_rftid_req_s { 1469struct fcgs_rftid_req_s {
@@ -1472,7 +1472,7 @@ struct fcgs_rftid_req_s {
1472 u32 fc4_type[8]; /* fc4 types */ 1472 u32 fc4_type[8]; /* fc4 types */
1473}; 1473};
1474 1474
1475/** 1475/*
1476 * RFF_ID : Register FC4 features. 1476 * RFF_ID : Register FC4 features.
1477 */ 1477 */
1478 1478
@@ -1487,7 +1487,7 @@ struct fcgs_rffid_req_s {
1487 u32 fc4_type:8; /* corresponding FC4 Type */ 1487 u32 fc4_type:8; /* corresponding FC4 Type */
1488}; 1488};
1489 1489
1490/** 1490/*
1491 * GID_FT Request 1491 * GID_FT Request
1492 */ 1492 */
1493struct fcgs_gidft_req_s { 1493struct fcgs_gidft_req_s {
@@ -1497,7 +1497,7 @@ struct fcgs_gidft_req_s {
1497 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ 1497 u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */
1498}; /* GID_FT Request */ 1498}; /* GID_FT Request */
1499 1499
1500/** 1500/*
1501 * GID_FT Response 1501 * GID_FT Response
1502 */ 1502 */
1503struct fcgs_gidft_resp_s { 1503struct fcgs_gidft_resp_s {
@@ -1506,7 +1506,7 @@ struct fcgs_gidft_resp_s {
1506 u32 pid:24; /* port identifier */ 1506 u32 pid:24; /* port identifier */
1507}; /* GID_FT Response */ 1507}; /* GID_FT Response */
1508 1508
1509/** 1509/*
1510 * RSPN_ID 1510 * RSPN_ID
1511 */ 1511 */
1512struct fcgs_rspnid_req_s { 1512struct fcgs_rspnid_req_s {
@@ -1516,7 +1516,7 @@ struct fcgs_rspnid_req_s {
1516 u8 spn[256]; /* symbolic port name */ 1516 u8 spn[256]; /* symbolic port name */
1517}; 1517};
1518 1518
1519/** 1519/*
1520 * RPN_ID 1520 * RPN_ID
1521 */ 1521 */
1522struct fcgs_rpnid_req_s { 1522struct fcgs_rpnid_req_s {
@@ -1525,7 +1525,7 @@ struct fcgs_rpnid_req_s {
1525 wwn_t port_name; 1525 wwn_t port_name;
1526}; 1526};
1527 1527
1528/** 1528/*
1529 * RNN_ID 1529 * RNN_ID
1530 */ 1530 */
1531struct fcgs_rnnid_req_s { 1531struct fcgs_rnnid_req_s {
@@ -1534,7 +1534,7 @@ struct fcgs_rnnid_req_s {
1534 wwn_t node_name; 1534 wwn_t node_name;
1535}; 1535};
1536 1536
1537/** 1537/*
1538 * RCS_ID 1538 * RCS_ID
1539 */ 1539 */
1540struct fcgs_rcsid_req_s { 1540struct fcgs_rcsid_req_s {
@@ -1543,7 +1543,7 @@ struct fcgs_rcsid_req_s {
1543 u32 cos; 1543 u32 cos;
1544}; 1544};
1545 1545
1546/** 1546/*
1547 * RPT_ID 1547 * RPT_ID
1548 */ 1548 */
1549struct fcgs_rptid_req_s { 1549struct fcgs_rptid_req_s {
@@ -1553,7 +1553,7 @@ struct fcgs_rptid_req_s {
1553 u32 rsvd1:24; 1553 u32 rsvd1:24;
1554}; 1554};
1555 1555
1556/** 1556/*
1557 * GA_NXT Request 1557 * GA_NXT Request
1558 */ 1558 */
1559struct fcgs_ganxt_req_s { 1559struct fcgs_ganxt_req_s {
@@ -1561,7 +1561,7 @@ struct fcgs_ganxt_req_s {
1561 u32 port_id:24; 1561 u32 port_id:24;
1562}; 1562};
1563 1563
1564/** 1564/*
1565 * GA_NXT Response 1565 * GA_NXT Response
1566 */ 1566 */
1567struct fcgs_ganxt_rsp_s { 1567struct fcgs_ganxt_rsp_s {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b7d2657ca82..9c725314b51 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -94,13 +94,13 @@ fcbuild_init(void)
94 */ 94 */
95 plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; 95 plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
96 plogi_tmpl.csp.verlo = FC_PH_VER_4_3; 96 plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
97 plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004); 97 plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
98 plogi_tmpl.csp.ciro = 0x1; 98 plogi_tmpl.csp.ciro = 0x1;
99 plogi_tmpl.csp.cisc = 0x0; 99 plogi_tmpl.csp.cisc = 0x0;
100 plogi_tmpl.csp.altbbcred = 0x0; 100 plogi_tmpl.csp.altbbcred = 0x0;
101 plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF); 101 plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
102 plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002); 102 plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
103 plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000); 103 plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
104 104
105 plogi_tmpl.class3.class_valid = 1; 105 plogi_tmpl.class3.class_valid = 1;
106 plogi_tmpl.class3.sequential = 1; 106 plogi_tmpl.class3.sequential = 1;
@@ -112,7 +112,7 @@ fcbuild_init(void)
112 */ 112 */
113 prli_tmpl.command = FC_ELS_PRLI; 113 prli_tmpl.command = FC_ELS_PRLI;
114 prli_tmpl.pglen = 0x10; 114 prli_tmpl.pglen = 0x10;
115 prli_tmpl.pagebytes = bfa_os_htons(0x0014); 115 prli_tmpl.pagebytes = cpu_to_be16(0x0014);
116 prli_tmpl.parampage.type = FC_TYPE_FCP; 116 prli_tmpl.parampage.type = FC_TYPE_FCP;
117 prli_tmpl.parampage.imagepair = 1; 117 prli_tmpl.parampage.imagepair = 1;
118 prli_tmpl.parampage.servparams.rxrdisab = 1; 118 prli_tmpl.parampage.servparams.rxrdisab = 1;
@@ -137,7 +137,7 @@ fcbuild_init(void)
137static void 137static void
138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) 138fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
139{ 139{
140 bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); 140 memset(fchs, 0, sizeof(struct fchs_s));
141 141
142 fchs->routing = FC_RTG_FC4_DEV_DATA; 142 fchs->routing = FC_RTG_FC4_DEV_DATA;
143 fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; 143 fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
@@ -148,9 +148,9 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
148 fchs->rx_id = FC_RXID_ANY; 148 fchs->rx_id = FC_RXID_ANY;
149 fchs->d_id = (d_id); 149 fchs->d_id = (d_id);
150 fchs->s_id = (s_id); 150 fchs->s_id = (s_id);
151 fchs->ox_id = bfa_os_htons(ox_id); 151 fchs->ox_id = cpu_to_be16(ox_id);
152 152
153 /** 153 /*
154 * @todo no need to set ox_id for request 154 * @todo no need to set ox_id for request
155 * no need to set rx_id for response 155 * no need to set rx_id for response
156 */ 156 */
@@ -159,16 +159,16 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
159void 159void
160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
161{ 161{
162 bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); 162 memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
163 fchs->d_id = (d_id); 163 fchs->d_id = (d_id);
164 fchs->s_id = (s_id); 164 fchs->s_id = (s_id);
165 fchs->ox_id = bfa_os_htons(ox_id); 165 fchs->ox_id = cpu_to_be16(ox_id);
166} 166}
167 167
168static void 168static void
169fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 169fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
170{ 170{
171 bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); 171 memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
172 fchs->d_id = d_id; 172 fchs->d_id = d_id;
173 fchs->s_id = s_id; 173 fchs->s_id = s_id;
174 fchs->ox_id = ox_id; 174 fchs->ox_id = ox_id;
@@ -198,7 +198,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
198static void 198static void
199fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 199fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
200{ 200{
201 bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); 201 memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
202 fchs->d_id = d_id; 202 fchs->d_id = d_id;
203 fchs->s_id = s_id; 203 fchs->s_id = s_id;
204 fchs->ox_id = ox_id; 204 fchs->ox_id = ox_id;
@@ -211,7 +211,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
211{ 211{
212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); 212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
213 213
214 bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 214 memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
215 215
216 plogi->els_cmd.els_code = els_code; 216 plogi->els_cmd.els_code = els_code;
217 if (els_code == FC_ELS_PLOGI) 217 if (els_code == FC_ELS_PLOGI)
@@ -219,10 +219,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
219 else 219 else
220 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 220 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
221 221
222 plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size); 222 plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
223 223
224 bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); 224 memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
225 bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); 225 memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
226 226
227 return sizeof(struct fc_logi_s); 227 return sizeof(struct fc_logi_s);
228} 228}
@@ -235,12 +235,12 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
235 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 235 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
236 u32 *vvl_info; 236 u32 *vvl_info;
237 237
238 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 238 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
239 239
240 flogi->els_cmd.els_code = FC_ELS_FLOGI; 240 flogi->els_cmd.els_code = FC_ELS_FLOGI;
241 fc_els_req_build(fchs, d_id, s_id, ox_id); 241 fc_els_req_build(fchs, d_id, s_id, ox_id);
242 242
243 flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); 243 flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
244 flogi->port_name = port_name; 244 flogi->port_name = port_name;
245 flogi->node_name = node_name; 245 flogi->node_name = node_name;
246 246
@@ -253,14 +253,14 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
253 /* set AUTH capability */ 253 /* set AUTH capability */
254 flogi->csp.security = set_auth; 254 flogi->csp.security = set_auth;
255 255
256 flogi->csp.bbcred = bfa_os_htons(local_bb_credits); 256 flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
257 257
258 /* Set brcd token in VVL */ 258 /* Set brcd token in VVL */
259 vvl_info = (u32 *)&flogi->vvl[0]; 259 vvl_info = (u32 *)&flogi->vvl[0];
260 260
261 /* set the flag to indicate the presence of VVL */ 261 /* set the flag to indicate the presence of VVL */
262 flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ 262 flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */
263 vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); 263 vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD);
264 264
265 return sizeof(struct fc_logi_s); 265 return sizeof(struct fc_logi_s);
266} 266}
@@ -272,15 +272,15 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
272{ 272{
273 u32 d_id = 0; 273 u32 d_id = 0;
274 274
275 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 275 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
276 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 276 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
277 277
278 flogi->els_cmd.els_code = FC_ELS_ACC; 278 flogi->els_cmd.els_code = FC_ELS_ACC;
279 flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); 279 flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
280 flogi->port_name = port_name; 280 flogi->port_name = port_name;
281 flogi->node_name = node_name; 281 flogi->node_name = node_name;
282 282
283 flogi->csp.bbcred = bfa_os_htons(local_bb_credits); 283 flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
284 284
285 return sizeof(struct fc_logi_s); 285 return sizeof(struct fc_logi_s);
286} 286}
@@ -291,12 +291,12 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
291{ 291{
292 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); 292 u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
293 293
294 bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 294 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
295 295
296 flogi->els_cmd.els_code = FC_ELS_FDISC; 296 flogi->els_cmd.els_code = FC_ELS_FDISC;
297 fc_els_req_build(fchs, d_id, s_id, ox_id); 297 fc_els_req_build(fchs, d_id, s_id, ox_id);
298 298
299 flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); 299 flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
300 flogi->port_name = port_name; 300 flogi->port_name = port_name;
301 flogi->node_name = node_name; 301 flogi->node_name = node_name;
302 302
@@ -346,7 +346,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
346 if (!plogi->class3.class_valid) 346 if (!plogi->class3.class_valid)
347 return FC_PARSE_FAILURE; 347 return FC_PARSE_FAILURE;
348 348
349 if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) 349 if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
350 return FC_PARSE_FAILURE; 350 return FC_PARSE_FAILURE;
351 351
352 return FC_PARSE_OK; 352 return FC_PARSE_OK;
@@ -363,8 +363,8 @@ fc_plogi_parse(struct fchs_s *fchs)
363 if (plogi->class3.class_valid != 1) 363 if (plogi->class3.class_valid != 1)
364 return FC_PARSE_FAILURE; 364 return FC_PARSE_FAILURE;
365 365
366 if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) 366 if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
367 || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) 367 || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
368 || (plogi->class3.rxsz == 0)) 368 || (plogi->class3.rxsz == 0))
369 return FC_PARSE_FAILURE; 369 return FC_PARSE_FAILURE;
370 370
@@ -378,7 +378,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
378 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 378 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
379 379
380 fc_els_req_build(fchs, d_id, s_id, ox_id); 380 fc_els_req_build(fchs, d_id, s_id, ox_id);
381 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 381 memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
382 382
383 prli->command = FC_ELS_PRLI; 383 prli->command = FC_ELS_PRLI;
384 prli->parampage.servparams.initiator = 1; 384 prli->parampage.servparams.initiator = 1;
@@ -397,7 +397,7 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld); 397 struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
398 398
399 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 399 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
400 bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); 400 memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
401 401
402 prli->command = FC_ELS_ACC; 402 prli->command = FC_ELS_ACC;
403 403
@@ -448,7 +448,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
448{ 448{
449 fc_els_req_build(fchs, d_id, s_id, ox_id); 449 fc_els_req_build(fchs, d_id, s_id, ox_id);
450 450
451 bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s)); 451 memset(logo, '\0', sizeof(struct fc_logo_s));
452 logo->els_cmd.els_code = FC_ELS_LOGO; 452 logo->els_cmd.els_code = FC_ELS_LOGO;
453 logo->nport_id = (s_id); 453 logo->nport_id = (s_id);
454 logo->orig_port_name = port_name; 454 logo->orig_port_name = port_name;
@@ -461,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
461 u32 s_id, u16 ox_id, wwn_t port_name, 461 u32 s_id, u16 ox_id, wwn_t port_name,
462 wwn_t node_name, u8 els_code) 462 wwn_t node_name, u8 els_code)
463{ 463{
464 bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s)); 464 memset(adisc, '\0', sizeof(struct fc_adisc_s));
465 465
466 adisc->els_cmd.els_code = els_code; 466 adisc->els_cmd.els_code = els_code;
467 467
@@ -537,7 +537,7 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
537 if (pdisc->class3.class_valid != 1) 537 if (pdisc->class3.class_valid != 1)
538 return FC_PARSE_FAILURE; 538 return FC_PARSE_FAILURE;
539 539
540 if ((bfa_os_ntohs(pdisc->class3.rxsz) < 540 if ((be16_to_cpu(pdisc->class3.rxsz) <
541 (FC_MIN_PDUSZ - sizeof(struct fchs_s))) 541 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
542 || (pdisc->class3.rxsz == 0)) 542 || (pdisc->class3.rxsz == 0))
543 return FC_PARSE_FAILURE; 543 return FC_PARSE_FAILURE;
@@ -554,11 +554,11 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
554u16 554u16
555fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) 555fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
556{ 556{
557 bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); 557 memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
558 fchs->cat_info = FC_CAT_ABTS; 558 fchs->cat_info = FC_CAT_ABTS;
559 fchs->d_id = (d_id); 559 fchs->d_id = (d_id);
560 fchs->s_id = (s_id); 560 fchs->s_id = (s_id);
561 fchs->ox_id = bfa_os_htons(ox_id); 561 fchs->ox_id = cpu_to_be16(ox_id);
562 562
563 return sizeof(struct fchs_s); 563 return sizeof(struct fchs_s);
564} 564}
@@ -582,9 +582,9 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
582 /* 582 /*
583 * build rrq payload 583 * build rrq payload
584 */ 584 */
585 bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); 585 memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
586 rrq->s_id = (s_id); 586 rrq->s_id = (s_id);
587 rrq->ox_id = bfa_os_htons(rrq_oxid); 587 rrq->ox_id = cpu_to_be16(rrq_oxid);
588 rrq->rx_id = FC_RXID_ANY; 588 rrq->rx_id = FC_RXID_ANY;
589 589
590 return sizeof(struct fc_rrq_s); 590 return sizeof(struct fc_rrq_s);
@@ -598,7 +598,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
598 598
599 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 599 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
600 600
601 bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s)); 601 memset(acc, 0, sizeof(struct fc_els_cmd_s));
602 acc->els_code = FC_ELS_ACC; 602 acc->els_code = FC_ELS_ACC;
603 603
604 return sizeof(struct fc_els_cmd_s); 604 return sizeof(struct fc_els_cmd_s);
@@ -610,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
610 u8 reason_code_expl) 610 u8 reason_code_expl)
611{ 611{
612 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 612 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
613 bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); 613 memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
614 614
615 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; 615 ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
616 ls_rjt->reason_code = reason_code; 616 ls_rjt->reason_code = reason_code;
@@ -626,7 +626,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
626{ 626{
627 fc_bls_rsp_build(fchs, d_id, s_id, ox_id); 627 fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
628 628
629 bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); 629 memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
630 630
631 fchs->rx_id = rx_id; 631 fchs->rx_id = rx_id;
632 632
@@ -641,7 +641,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
641 u32 s_id, u16 ox_id) 641 u32 s_id, u16 ox_id)
642{ 642{
643 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 643 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
644 bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); 644 memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
645 els_cmd->els_code = FC_ELS_ACC; 645 els_cmd->els_code = FC_ELS_ACC;
646 646
647 return sizeof(struct fc_els_cmd_s); 647 return sizeof(struct fc_els_cmd_s);
@@ -656,10 +656,10 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
656 656
657 if (els_code == FC_ELS_PRLO) { 657 if (els_code == FC_ELS_PRLO) {
658 prlo = (struct fc_prlo_s *) (fc_frame + 1); 658 prlo = (struct fc_prlo_s *) (fc_frame + 1);
659 num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16; 659 num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
660 } else { 660 } else {
661 tprlo = (struct fc_tprlo_s *) (fc_frame + 1); 661 tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
662 num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; 662 num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
663 } 663 }
664 return num_pages; 664 return num_pages;
665} 665}
@@ -672,11 +672,11 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
672 672
673 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 673 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
674 674
675 bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4); 675 memset(tprlo_acc, 0, (num_pages * 16) + 4);
676 tprlo_acc->command = FC_ELS_ACC; 676 tprlo_acc->command = FC_ELS_ACC;
677 677
678 tprlo_acc->page_len = 0x10; 678 tprlo_acc->page_len = 0x10;
679 tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); 679 tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
680 680
681 for (page = 0; page < num_pages; page++) { 681 for (page = 0; page < num_pages; page++) {
682 tprlo_acc->tprlo_acc_params[page].opa_valid = 0; 682 tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
@@ -685,7 +685,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
685 tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; 685 tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
686 tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; 686 tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
687 } 687 }
688 return bfa_os_ntohs(tprlo_acc->payload_len); 688 return be16_to_cpu(tprlo_acc->payload_len);
689} 689}
690 690
691u16 691u16
@@ -696,10 +696,10 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
696 696
697 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 697 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
698 698
699 bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4); 699 memset(prlo_acc, 0, (num_pages * 16) + 4);
700 prlo_acc->command = FC_ELS_ACC; 700 prlo_acc->command = FC_ELS_ACC;
701 prlo_acc->page_len = 0x10; 701 prlo_acc->page_len = 0x10;
702 prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); 702 prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
703 703
704 for (page = 0; page < num_pages; page++) { 704 for (page = 0; page < num_pages; page++) {
705 prlo_acc->prlo_acc_params[page].opa_valid = 0; 705 prlo_acc->prlo_acc_params[page].opa_valid = 0;
@@ -709,7 +709,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
709 prlo_acc->prlo_acc_params[page].resp_process_assc = 0; 709 prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
710 } 710 }
711 711
712 return bfa_os_ntohs(prlo_acc->payload_len); 712 return be16_to_cpu(prlo_acc->payload_len);
713} 713}
714 714
715u16 715u16
@@ -718,7 +718,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
718{ 718{
719 fc_els_req_build(fchs, d_id, s_id, ox_id); 719 fc_els_req_build(fchs, d_id, s_id, ox_id);
720 720
721 bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); 721 memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
722 722
723 rnid->els_cmd.els_code = FC_ELS_RNID; 723 rnid->els_cmd.els_code = FC_ELS_RNID;
724 rnid->node_id_data_format = data_format; 724 rnid->node_id_data_format = data_format;
@@ -732,7 +732,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
732 struct fc_rnid_common_id_data_s *common_id_data, 732 struct fc_rnid_common_id_data_s *common_id_data,
733 struct fc_rnid_general_topology_data_s *gen_topo_data) 733 struct fc_rnid_general_topology_data_s *gen_topo_data)
734{ 734{
735 bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); 735 memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
736 736
737 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 737 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
738 738
@@ -745,7 +745,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
745 if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { 745 if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
746 rnid_acc->specific_id_data_length = 746 rnid_acc->specific_id_data_length =
747 sizeof(struct fc_rnid_general_topology_data_s); 747 sizeof(struct fc_rnid_general_topology_data_s);
748 bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); 748 rnid_acc->gen_topology_data = *gen_topo_data;
749 return sizeof(struct fc_rnid_acc_s); 749 return sizeof(struct fc_rnid_acc_s);
750 } else { 750 } else {
751 return sizeof(struct fc_rnid_acc_s) - 751 return sizeof(struct fc_rnid_acc_s) -
@@ -760,7 +760,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
760{ 760{
761 fc_els_req_build(fchs, d_id, s_id, ox_id); 761 fc_els_req_build(fchs, d_id, s_id, ox_id);
762 762
763 bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); 763 memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
764 764
765 rpsc->els_cmd.els_code = FC_ELS_RPSC; 765 rpsc->els_cmd.els_code = FC_ELS_RPSC;
766 return sizeof(struct fc_rpsc_cmd_s); 766 return sizeof(struct fc_rpsc_cmd_s);
@@ -775,11 +775,11 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
775 775
776 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); 776 fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
777 777
778 bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); 778 memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
779 779
780 rpsc2->els_cmd.els_code = FC_ELS_RPSC; 780 rpsc2->els_cmd.els_code = FC_ELS_RPSC;
781 rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); 781 rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
782 rpsc2->num_pids = bfa_os_htons(npids); 782 rpsc2->num_pids = cpu_to_be16(npids);
783 for (i = 0; i < npids; i++) 783 for (i = 0; i < npids; i++)
784 rpsc2->pid_list[i].pid = pid_list[i]; 784 rpsc2->pid_list[i].pid = pid_list[i];
785 785
@@ -791,18 +791,18 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
791 u32 d_id, u32 s_id, u16 ox_id, 791 u32 d_id, u32 s_id, u16 ox_id,
792 struct fc_rpsc_speed_info_s *oper_speed) 792 struct fc_rpsc_speed_info_s *oper_speed)
793{ 793{
794 bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); 794 memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
795 795
796 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 796 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
797 797
798 rpsc_acc->command = FC_ELS_ACC; 798 rpsc_acc->command = FC_ELS_ACC;
799 rpsc_acc->num_entries = bfa_os_htons(1); 799 rpsc_acc->num_entries = cpu_to_be16(1);
800 800
801 rpsc_acc->speed_info[0].port_speed_cap = 801 rpsc_acc->speed_info[0].port_speed_cap =
802 bfa_os_htons(oper_speed->port_speed_cap); 802 cpu_to_be16(oper_speed->port_speed_cap);
803 803
804 rpsc_acc->speed_info[0].port_op_speed = 804 rpsc_acc->speed_info[0].port_op_speed =
805 bfa_os_htons(oper_speed->port_op_speed); 805 cpu_to_be16(oper_speed->port_op_speed);
806 806
807 return sizeof(struct fc_rpsc_acc_s); 807 return sizeof(struct fc_rpsc_acc_s);
808} 808}
@@ -830,12 +830,12 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
830{ 830{
831 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); 831 struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
832 832
833 bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); 833 memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
834 834
835 pdisc->els_cmd.els_code = FC_ELS_PDISC; 835 pdisc->els_cmd.els_code = FC_ELS_PDISC;
836 fc_els_req_build(fchs, d_id, s_id, ox_id); 836 fc_els_req_build(fchs, d_id, s_id, ox_id);
837 837
838 pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size); 838 pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
839 pdisc->port_name = port_name; 839 pdisc->port_name = port_name;
840 pdisc->node_name = node_name; 840 pdisc->node_name = node_name;
841 841
@@ -859,7 +859,7 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
859 if (!pdisc->class3.class_valid) 859 if (!pdisc->class3.class_valid)
860 return FC_PARSE_NWWN_NOT_EQUAL; 860 return FC_PARSE_NWWN_NOT_EQUAL;
861 861
862 if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) 862 if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
863 return FC_PARSE_RXSZ_INVAL; 863 return FC_PARSE_RXSZ_INVAL;
864 864
865 return FC_PARSE_OK; 865 return FC_PARSE_OK;
@@ -873,10 +873,10 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
873 int page; 873 int page;
874 874
875 fc_els_req_build(fchs, d_id, s_id, ox_id); 875 fc_els_req_build(fchs, d_id, s_id, ox_id);
876 bfa_os_memset(prlo, 0, (num_pages * 16) + 4); 876 memset(prlo, 0, (num_pages * 16) + 4);
877 prlo->command = FC_ELS_PRLO; 877 prlo->command = FC_ELS_PRLO;
878 prlo->page_len = 0x10; 878 prlo->page_len = 0x10;
879 prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 879 prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
880 880
881 for (page = 0; page < num_pages; page++) { 881 for (page = 0; page < num_pages; page++) {
882 prlo->prlo_params[page].type = FC_TYPE_FCP; 882 prlo->prlo_params[page].type = FC_TYPE_FCP;
@@ -886,7 +886,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
886 prlo->prlo_params[page].resp_process_assc = 0; 886 prlo->prlo_params[page].resp_process_assc = 0;
887 } 887 }
888 888
889 return bfa_os_ntohs(prlo->payload_len); 889 return be16_to_cpu(prlo->payload_len);
890} 890}
891 891
892u16 892u16
@@ -901,7 +901,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
901 if (prlo->command != FC_ELS_ACC) 901 if (prlo->command != FC_ELS_ACC)
902 return FC_PARSE_FAILURE; 902 return FC_PARSE_FAILURE;
903 903
904 num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; 904 num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
905 905
906 for (page = 0; page < num_pages; page++) { 906 for (page = 0; page < num_pages; page++) {
907 if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP) 907 if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
@@ -931,10 +931,10 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
931 int page; 931 int page;
932 932
933 fc_els_req_build(fchs, d_id, s_id, ox_id); 933 fc_els_req_build(fchs, d_id, s_id, ox_id);
934 bfa_os_memset(tprlo, 0, (num_pages * 16) + 4); 934 memset(tprlo, 0, (num_pages * 16) + 4);
935 tprlo->command = FC_ELS_TPRLO; 935 tprlo->command = FC_ELS_TPRLO;
936 tprlo->page_len = 0x10; 936 tprlo->page_len = 0x10;
937 tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); 937 tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
938 938
939 for (page = 0; page < num_pages; page++) { 939 for (page = 0; page < num_pages; page++) {
940 tprlo->tprlo_params[page].type = FC_TYPE_FCP; 940 tprlo->tprlo_params[page].type = FC_TYPE_FCP;
@@ -950,7 +950,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
950 } 950 }
951 } 951 }
952 952
953 return bfa_os_ntohs(tprlo->payload_len); 953 return be16_to_cpu(tprlo->payload_len);
954} 954}
955 955
956u16 956u16
@@ -965,7 +965,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
965 if (tprlo->command != FC_ELS_ACC) 965 if (tprlo->command != FC_ELS_ACC)
966 return FC_PARSE_ACC_INVAL; 966 return FC_PARSE_ACC_INVAL;
967 967
968 num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; 968 num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
969 969
970 for (page = 0; page < num_pages; page++) { 970 for (page = 0; page < num_pages; page++) {
971 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) 971 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
@@ -1011,32 +1011,32 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
1011static void 1011static void
1012fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) 1012fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
1013{ 1013{
1014 bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1014 memset(cthdr, 0, sizeof(struct ct_hdr_s));
1015 cthdr->rev_id = CT_GS3_REVISION; 1015 cthdr->rev_id = CT_GS3_REVISION;
1016 cthdr->gs_type = CT_GSTYPE_DIRSERVICE; 1016 cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
1017 cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; 1017 cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
1018 cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); 1018 cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
1019} 1019}
1020 1020
1021static void 1021static void
1022fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) 1022fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
1023{ 1023{
1024 bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1024 memset(cthdr, 0, sizeof(struct ct_hdr_s));
1025 cthdr->rev_id = CT_GS3_REVISION; 1025 cthdr->rev_id = CT_GS3_REVISION;
1026 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; 1026 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
1027 cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; 1027 cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
1028 cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); 1028 cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
1029} 1029}
1030 1030
1031static void 1031static void
1032fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, 1032fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
1033 u8 sub_type) 1033 u8 sub_type)
1034{ 1034{
1035 bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); 1035 memset(cthdr, 0, sizeof(struct ct_hdr_s));
1036 cthdr->rev_id = CT_GS3_REVISION; 1036 cthdr->rev_id = CT_GS3_REVISION;
1037 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; 1037 cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
1038 cthdr->gs_sub_type = sub_type; 1038 cthdr->gs_sub_type = sub_type;
1039 cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); 1039 cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
1040} 1040}
1041 1041
1042u16 1042u16
@@ -1050,7 +1050,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1050 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1050 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1051 fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); 1051 fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
1052 1052
1053 bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); 1053 memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
1054 gidpn->port_name = port_name; 1054 gidpn->port_name = port_name;
1055 return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); 1055 return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
1056} 1056}
@@ -1066,7 +1066,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1066 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1066 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1067 fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); 1067 fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
1068 1068
1069 bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); 1069 memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
1070 gpnid->dap = port_id; 1070 gpnid->dap = port_id;
1071 return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); 1071 return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
1072} 1072}
@@ -1082,7 +1082,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1082 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1082 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1083 fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); 1083 fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
1084 1084
1085 bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); 1085 memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
1086 gnnid->dap = port_id; 1086 gnnid->dap = port_id;
1087 return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); 1087 return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
1088} 1088}
@@ -1090,7 +1090,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1090u16 1090u16
1091fc_ct_rsp_parse(struct ct_hdr_s *cthdr) 1091fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
1092{ 1092{
1093 if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { 1093 if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
1094 if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) 1094 if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
1095 return FC_PARSE_BUSY; 1095 return FC_PARSE_BUSY;
1096 else 1096 else
@@ -1108,7 +1108,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
1108 1108
1109 fc_els_req_build(fchs, d_id, s_id, ox_id); 1109 fc_els_req_build(fchs, d_id, s_id, ox_id);
1110 1110
1111 bfa_os_memset(scr, 0, sizeof(struct fc_scr_s)); 1111 memset(scr, 0, sizeof(struct fc_scr_s));
1112 scr->command = FC_ELS_SCR; 1112 scr->command = FC_ELS_SCR;
1113 scr->reg_func = FC_SCR_REG_FUNC_FULL; 1113 scr->reg_func = FC_SCR_REG_FUNC_FULL;
1114 if (set_br_reg) 1114 if (set_br_reg)
@@ -1129,7 +1129,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
1129 rscn->pagelen = sizeof(rscn->event[0]); 1129 rscn->pagelen = sizeof(rscn->event[0]);
1130 1130
1131 payldlen = sizeof(u32) + rscn->pagelen; 1131 payldlen = sizeof(u32) + rscn->pagelen;
1132 rscn->payldlen = bfa_os_htons(payldlen); 1132 rscn->payldlen = cpu_to_be16(payldlen);
1133 1133
1134 rscn->event[0].format = FC_RSCN_FORMAT_PORTID; 1134 rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
1135 rscn->event[0].portid = s_id; 1135 rscn->event[0].portid = s_id;
@@ -1149,14 +1149,14 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1149 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1149 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1150 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); 1150 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
1151 1151
1152 bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); 1152 memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
1153 1153
1154 rftid->dap = s_id; 1154 rftid->dap = s_id;
1155 1155
1156 /* By default, FCP FC4 Type is registered */ 1156 /* By default, FCP FC4 Type is registered */
1157 index = FC_TYPE_FCP >> 5; 1157 index = FC_TYPE_FCP >> 5;
1158 type_value = 1 << (FC_TYPE_FCP % 32); 1158 type_value = 1 << (FC_TYPE_FCP % 32);
1159 rftid->fc4_type[index] = bfa_os_htonl(type_value); 1159 rftid->fc4_type[index] = cpu_to_be32(type_value);
1160 1160
1161 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1161 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
1162} 1162}
@@ -1172,10 +1172,10 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1172 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1172 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1173 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); 1173 fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
1174 1174
1175 bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); 1175 memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
1176 1176
1177 rftid->dap = s_id; 1177 rftid->dap = s_id;
1178 bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, 1178 memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
1179 (bitmap_size < 32 ? bitmap_size : 32)); 1179 (bitmap_size < 32 ? bitmap_size : 32));
1180 1180
1181 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); 1181 return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
@@ -1192,7 +1192,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1192 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1192 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1193 fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); 1193 fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
1194 1194
1195 bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); 1195 memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
1196 1196
1197 rffid->dap = s_id; 1197 rffid->dap = s_id;
1198 rffid->fc4ftr_bits = fc4_ftrs; 1198 rffid->fc4ftr_bits = fc4_ftrs;
@@ -1214,7 +1214,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1214 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); 1214 fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
1215 fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); 1215 fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
1216 1216
1217 bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); 1217 memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
1218 1218
1219 rspnid->dap = s_id; 1219 rspnid->dap = s_id;
1220 rspnid->spn_len = (u8) strlen((char *)name); 1220 rspnid->spn_len = (u8) strlen((char *)name);
@@ -1235,7 +1235,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
1235 1235
1236 fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); 1236 fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
1237 1237
1238 bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); 1238 memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
1239 gidft->fc4_type = fc4_type; 1239 gidft->fc4_type = fc4_type;
1240 gidft->domain_id = 0; 1240 gidft->domain_id = 0;
1241 gidft->area_id = 0; 1241 gidft->area_id = 0;
@@ -1254,7 +1254,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1254 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1254 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1255 fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); 1255 fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
1256 1256
1257 bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); 1257 memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
1258 rpnid->port_id = port_id; 1258 rpnid->port_id = port_id;
1259 rpnid->port_name = port_name; 1259 rpnid->port_name = port_name;
1260 1260
@@ -1272,7 +1272,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1272 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1272 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1273 fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); 1273 fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
1274 1274
1275 bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); 1275 memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
1276 rnnid->port_id = port_id; 1276 rnnid->port_id = port_id;
1277 rnnid->node_name = node_name; 1277 rnnid->node_name = node_name;
1278 1278
@@ -1291,7 +1291,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1291 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1291 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1292 fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); 1292 fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
1293 1293
1294 bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); 1294 memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
1295 rcsid->port_id = port_id; 1295 rcsid->port_id = port_id;
1296 rcsid->cos = cos; 1296 rcsid->cos = cos;
1297 1297
@@ -1309,7 +1309,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1309 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1310 fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); 1310 fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
1311 1311
1312 bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); 1312 memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
1313 rptid->port_id = port_id; 1313 rptid->port_id = port_id;
1314 rptid->port_type = port_type; 1314 rptid->port_type = port_type;
1315 1315
@@ -1326,7 +1326,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
1326 fc_gs_fchdr_build(fchs, d_id, s_id, 0); 1326 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1327 fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); 1327 fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
1328 1328
1329 bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); 1329 memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
1330 ganxt->port_id = port_id; 1330 ganxt->port_id = port_id;
1331 1331
1332 return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); 1332 return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
@@ -1365,7 +1365,7 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
1365 1365
1366 index = fc4_type >> 5; 1366 index = fc4_type >> 5;
1367 type_value = 1 << (fc4_type % 32); 1367 type_value = 1 << (fc4_type % 32);
1368 ptr[index] = bfa_os_htonl(type_value); 1368 ptr[index] = cpu_to_be32(type_value);
1369 1369
1370} 1370}
1371 1371
@@ -1383,7 +1383,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1383 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, 1383 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
1384 CT_GSSUBTYPE_CFGSERVER); 1384 CT_GSSUBTYPE_CFGSERVER);
1385 1385
1386 bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); 1386 memset(gmal, 0, sizeof(fcgs_gmal_req_t));
1387 gmal->wwn = wwn; 1387 gmal->wwn = wwn;
1388 1388
1389 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); 1389 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
@@ -1403,7 +1403,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
1403 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, 1403 fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
1404 CT_GSSUBTYPE_CFGSERVER); 1404 CT_GSSUBTYPE_CFGSERVER);
1405 1405
1406 bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); 1406 memset(gfn, 0, sizeof(fcgs_gfn_req_t));
1407 gfn->wwn = wwn; 1407 gfn->wwn = wwn;
1408 1408
1409 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); 1409 return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 33c8dd51f47..135c4427801 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -26,7 +26,7 @@ BFA_MODULE(fcpim);
26 (__l->__stats += __r->__stats) 26 (__l->__stats += __r->__stats)
27 27
28 28
29/** 29/*
30 * BFA ITNIM Related definitions 30 * BFA ITNIM Related definitions
31 */ 31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
@@ -72,7 +72,7 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
72 } \ 72 } \
73} while (0) 73} while (0)
74 74
75/** 75/*
76 * bfa_itnim_sm BFA itnim state machine 76 * bfa_itnim_sm BFA itnim state machine
77 */ 77 */
78 78
@@ -89,7 +89,7 @@ enum bfa_itnim_event {
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ 89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90}; 90};
91 91
92/** 92/*
93 * BFA IOIM related definitions 93 * BFA IOIM related definitions
94 */ 94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \ 95#define bfa_ioim_move_to_comp_q(__ioim) do { \
@@ -107,11 +107,11 @@ enum bfa_itnim_event {
107 if ((__fcpim)->profile_start) \ 107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \ 108 (__fcpim)->profile_start(__ioim); \
109} while (0) 109} while (0)
110/** 110/*
111 * hal_ioim_sm 111 * hal_ioim_sm
112 */ 112 */
113 113
114/** 114/*
115 * IO state machine events 115 * IO state machine events
116 */ 116 */
117enum bfa_ioim_event { 117enum bfa_ioim_event {
@@ -136,11 +136,11 @@ enum bfa_ioim_event {
136}; 136};
137 137
138 138
139/** 139/*
140 * BFA TSKIM related definitions 140 * BFA TSKIM related definitions
141 */ 141 */
142 142
143/** 143/*
144 * task management completion handling 144 * task management completion handling
145 */ 145 */
146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ 146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
@@ -165,7 +165,7 @@ enum bfa_tskim_event {
165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
166}; 166};
167 167
168/** 168/*
169 * forward declaration for BFA ITNIM functions 169 * forward declaration for BFA ITNIM functions
170 */ 170 */
171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); 171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
@@ -183,7 +183,7 @@ static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); 183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); 184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
185 185
186/** 186/*
187 * forward declaration of ITNIM state machine 187 * forward declaration of ITNIM state machine
188 */ 188 */
189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, 189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
@@ -217,7 +217,7 @@ static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
218 enum bfa_itnim_event event); 218 enum bfa_itnim_event event);
219 219
220/** 220/*
221 * forward declaration for BFA IOIM functions 221 * forward declaration for BFA IOIM functions
222 */ 222 */
223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); 223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
@@ -233,7 +233,7 @@ static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234 234
235 235
236/** 236/*
237 * forward declaration of BFA IO state machine 237 * forward declaration of BFA IO state machine
238 */ 238 */
239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, 239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
@@ -261,7 +261,7 @@ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event); 262 enum bfa_ioim_event event);
263 263
264/** 264/*
265 * forward declaration for BFA TSKIM functions 265 * forward declaration for BFA TSKIM functions
266 */ 266 */
267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); 267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
@@ -276,7 +276,7 @@ static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277 277
278 278
279/** 279/*
280 * forward declaration of BFA TSKIM state machine 280 * forward declaration of BFA TSKIM state machine
281 */ 281 */
282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, 282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
@@ -294,11 +294,11 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295 enum bfa_tskim_event event); 295 enum bfa_tskim_event event);
296 296
297/** 297/*
298 * hal_fcpim_mod BFA FCP Initiator Mode module 298 * hal_fcpim_mod BFA FCP Initiator Mode module
299 */ 299 */
300 300
301/** 301/*
302 * Compute and return memory needed by FCP(im) module. 302 * Compute and return memory needed by FCP(im) module.
303 */ 303 */
304static void 304static void
@@ -307,7 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
307{ 307{
308 bfa_itnim_meminfo(cfg, km_len, dm_len); 308 bfa_itnim_meminfo(cfg, km_len, dm_len);
309 309
310 /** 310 /*
311 * IO memory 311 * IO memory
312 */ 312 */
313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) 313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
@@ -320,7 +320,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
320 320
321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; 321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
322 322
323 /** 323 /*
324 * task management command memory 324 * task management command memory
325 */ 325 */
326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) 326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
@@ -463,7 +463,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
463 struct bfa_itnim_s *itnim; 463 struct bfa_itnim_s *itnim;
464 464
465 /* accumulate IO stats from itnim */ 465 /* accumulate IO stats from itnim */
466 bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); 466 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
467 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 467 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
468 itnim = (struct bfa_itnim_s *) qe; 468 itnim = (struct bfa_itnim_s *) qe;
469 if (itnim->rport->rport_info.lp_tag != lp_tag) 469 if (itnim->rport->rport_info.lp_tag != lp_tag)
@@ -480,7 +480,7 @@ bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
480 struct bfa_itnim_s *itnim; 480 struct bfa_itnim_s *itnim;
481 481
482 /* accumulate IO stats from itnim */ 482 /* accumulate IO stats from itnim */
483 bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); 483 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
484 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 484 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
485 itnim = (struct bfa_itnim_s *) qe; 485 itnim = (struct bfa_itnim_s *) qe;
486 bfa_fcpim_add_stats(modstats, &(itnim->stats)); 486 bfa_fcpim_add_stats(modstats, &(itnim->stats));
@@ -560,7 +560,7 @@ bfa_fcpim_clr_modstats(struct bfa_s *bfa)
560 itnim = (struct bfa_itnim_s *) qe; 560 itnim = (struct bfa_itnim_s *) qe;
561 bfa_itnim_clear_stats(itnim); 561 bfa_itnim_clear_stats(itnim);
562 } 562 }
563 bfa_os_memset(&fcpim->del_itn_stats, 0, 563 memset(&fcpim->del_itn_stats, 0,
564 sizeof(struct bfa_fcpim_del_itn_stats_s)); 564 sizeof(struct bfa_fcpim_del_itn_stats_s));
565 565
566 return BFA_STATUS_OK; 566 return BFA_STATUS_OK;
@@ -604,11 +604,11 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
604 604
605 605
606 606
607/** 607/*
608 * BFA ITNIM module state machine functions 608 * BFA ITNIM module state machine functions
609 */ 609 */
610 610
611/** 611/*
612 * Beginning/unallocated state - no events expected. 612 * Beginning/unallocated state - no events expected.
613 */ 613 */
614static void 614static void
@@ -629,7 +629,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
629 } 629 }
630} 630}
631 631
632/** 632/*
633 * Beginning state, only online event expected. 633 * Beginning state, only online event expected.
634 */ 634 */
635static void 635static void
@@ -660,7 +660,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
660 } 660 }
661} 661}
662 662
663/** 663/*
664 * Waiting for itnim create response from firmware. 664 * Waiting for itnim create response from firmware.
665 */ 665 */
666static void 666static void
@@ -732,7 +732,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
732 } 732 }
733} 733}
734 734
735/** 735/*
736 * Waiting for itnim create response from firmware, a delete is pending. 736 * Waiting for itnim create response from firmware, a delete is pending.
737 */ 737 */
738static void 738static void
@@ -760,7 +760,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
760 } 760 }
761} 761}
762 762
763/** 763/*
764 * Online state - normal parking state. 764 * Online state - normal parking state.
765 */ 765 */
766static void 766static void
@@ -802,7 +802,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
802 } 802 }
803} 803}
804 804
805/** 805/*
806 * Second level error recovery need. 806 * Second level error recovery need.
807 */ 807 */
808static void 808static void
@@ -833,7 +833,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
833 } 833 }
834} 834}
835 835
836/** 836/*
837 * Going offline. Waiting for active IO cleanup. 837 * Going offline. Waiting for active IO cleanup.
838 */ 838 */
839static void 839static void
@@ -870,7 +870,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
870 } 870 }
871} 871}
872 872
873/** 873/*
874 * Deleting itnim. Waiting for active IO cleanup. 874 * Deleting itnim. Waiting for active IO cleanup.
875 */ 875 */
876static void 876static void
@@ -898,7 +898,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
898 } 898 }
899} 899}
900 900
901/** 901/*
902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. 902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
903 */ 903 */
904static void 904static void
@@ -955,7 +955,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
955 } 955 }
956} 956}
957 957
958/** 958/*
959 * Offline state. 959 * Offline state.
960 */ 960 */
961static void 961static void
@@ -987,7 +987,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
987 } 987 }
988} 988}
989 989
990/** 990/*
991 * IOC h/w failed state. 991 * IOC h/w failed state.
992 */ 992 */
993static void 993static void
@@ -1023,7 +1023,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
1023 } 1023 }
1024} 1024}
1025 1025
1026/** 1026/*
1027 * Itnim is deleted, waiting for firmware response to delete. 1027 * Itnim is deleted, waiting for firmware response to delete.
1028 */ 1028 */
1029static void 1029static void
@@ -1068,7 +1068,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1068 } 1068 }
1069} 1069}
1070 1070
1071/** 1071/*
1072 * Initiate cleanup of all IOs on an IOC failure. 1072 * Initiate cleanup of all IOs on an IOC failure.
1073 */ 1073 */
1074static void 1074static void
@@ -1088,7 +1088,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1088 bfa_ioim_iocdisable(ioim); 1088 bfa_ioim_iocdisable(ioim);
1089 } 1089 }
1090 1090
1091 /** 1091 /*
1092 * For IO request in pending queue, we pretend an early timeout. 1092 * For IO request in pending queue, we pretend an early timeout.
1093 */ 1093 */
1094 list_for_each_safe(qe, qen, &itnim->pending_q) { 1094 list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -1102,7 +1102,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1102 } 1102 }
1103} 1103}
1104 1104
1105/** 1105/*
1106 * IO cleanup completion 1106 * IO cleanup completion
1107 */ 1107 */
1108static void 1108static void
@@ -1114,7 +1114,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg)
1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); 1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1115} 1115}
1116 1116
1117/** 1117/*
1118 * Initiate cleanup of all IOs. 1118 * Initiate cleanup of all IOs.
1119 */ 1119 */
1120static void 1120static void
@@ -1129,7 +1129,7 @@ bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1129 list_for_each_safe(qe, qen, &itnim->io_q) { 1129 list_for_each_safe(qe, qen, &itnim->io_q) {
1130 ioim = (struct bfa_ioim_s *) qe; 1130 ioim = (struct bfa_ioim_s *) qe;
1131 1131
1132 /** 1132 /*
1133 * Move IO to a cleanup queue from active queue so that a later 1133 * Move IO to a cleanup queue from active queue so that a later
1134 * TM will not pickup this IO. 1134 * TM will not pickup this IO.
1135 */ 1135 */
@@ -1176,7 +1176,7 @@ __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1176 bfa_cb_itnim_sler(itnim->ditn); 1176 bfa_cb_itnim_sler(itnim->ditn);
1177} 1177}
1178 1178
1179/** 1179/*
1180 * Call to resume any I/O requests waiting for room in request queue. 1180 * Call to resume any I/O requests waiting for room in request queue.
1181 */ 1181 */
1182static void 1182static void
@@ -1190,7 +1190,7 @@ bfa_itnim_qresume(void *cbarg)
1190 1190
1191 1191
1192 1192
1193/** 1193/*
1194 * bfa_itnim_public 1194 * bfa_itnim_public
1195 */ 1195 */
1196 1196
@@ -1210,7 +1210,7 @@ void
1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1211 u32 *dm_len) 1211 u32 *dm_len)
1212{ 1212{
1213 /** 1213 /*
1214 * ITN memory 1214 * ITN memory
1215 */ 1215 */
1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); 1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
@@ -1229,7 +1229,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1229 fcpim->itnim_arr = itnim; 1229 fcpim->itnim_arr = itnim;
1230 1230
1231 for (i = 0; i < fcpim->num_itnims; i++, itnim++) { 1231 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1232 bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s)); 1232 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1233 itnim->bfa = bfa; 1233 itnim->bfa = bfa;
1234 itnim->fcpim = fcpim; 1234 itnim->fcpim = fcpim;
1235 itnim->reqq = BFA_REQQ_QOS_LO; 1235 itnim->reqq = BFA_REQQ_QOS_LO;
@@ -1264,7 +1264,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1264 1264
1265 itnim->msg_no++; 1265 itnim->msg_no++;
1266 1266
1267 /** 1267 /*
1268 * check for room in queue to send request now 1268 * check for room in queue to send request now
1269 */ 1269 */
1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq); 1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1281,7 +1281,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1281 m->msg_no = itnim->msg_no; 1281 m->msg_no = itnim->msg_no;
1282 bfa_stats(itnim, fw_create); 1282 bfa_stats(itnim, fw_create);
1283 1283
1284 /** 1284 /*
1285 * queue I/O message to firmware 1285 * queue I/O message to firmware
1286 */ 1286 */
1287 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1287 bfa_reqq_produce(itnim->bfa, itnim->reqq);
@@ -1293,7 +1293,7 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1293{ 1293{
1294 struct bfi_itnim_delete_req_s *m; 1294 struct bfi_itnim_delete_req_s *m;
1295 1295
1296 /** 1296 /*
1297 * check for room in queue to send request now 1297 * check for room in queue to send request now
1298 */ 1298 */
1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq); 1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1307,14 +1307,14 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1307 m->fw_handle = itnim->rport->fw_handle; 1307 m->fw_handle = itnim->rport->fw_handle;
1308 bfa_stats(itnim, fw_delete); 1308 bfa_stats(itnim, fw_delete);
1309 1309
1310 /** 1310 /*
1311 * queue I/O message to firmware 1311 * queue I/O message to firmware
1312 */ 1312 */
1313 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1313 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1314 return BFA_TRUE; 1314 return BFA_TRUE;
1315} 1315}
1316 1316
1317/** 1317/*
1318 * Cleanup all pending failed inflight requests. 1318 * Cleanup all pending failed inflight requests.
1319 */ 1319 */
1320static void 1320static void
@@ -1329,7 +1329,7 @@ bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1329 } 1329 }
1330} 1330}
1331 1331
1332/** 1332/*
1333 * Start all pending IO requests. 1333 * Start all pending IO requests.
1334 */ 1334 */
1335static void 1335static void
@@ -1339,12 +1339,12 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1339 1339
1340 bfa_itnim_iotov_stop(itnim); 1340 bfa_itnim_iotov_stop(itnim);
1341 1341
1342 /** 1342 /*
1343 * Abort all inflight IO requests in the queue 1343 * Abort all inflight IO requests in the queue
1344 */ 1344 */
1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE); 1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1346 1346
1347 /** 1347 /*
1348 * Start all pending IO requests. 1348 * Start all pending IO requests.
1349 */ 1349 */
1350 while (!list_empty(&itnim->pending_q)) { 1350 while (!list_empty(&itnim->pending_q)) {
@@ -1354,7 +1354,7 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1354 } 1354 }
1355} 1355}
1356 1356
1357/** 1357/*
1358 * Fail all pending IO requests 1358 * Fail all pending IO requests
1359 */ 1359 */
1360static void 1360static void
@@ -1362,12 +1362,12 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1362{ 1362{
1363 struct bfa_ioim_s *ioim; 1363 struct bfa_ioim_s *ioim;
1364 1364
1365 /** 1365 /*
1366 * Fail all inflight IO requests in the queue 1366 * Fail all inflight IO requests in the queue
1367 */ 1367 */
1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE); 1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1369 1369
1370 /** 1370 /*
1371 * Fail any pending IO requests. 1371 * Fail any pending IO requests.
1372 */ 1372 */
1373 while (!list_empty(&itnim->pending_q)) { 1373 while (!list_empty(&itnim->pending_q)) {
@@ -1377,7 +1377,7 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1377 } 1377 }
1378} 1378}
1379 1379
1380/** 1380/*
1381 * IO TOV timer callback. Fail any pending IO requests. 1381 * IO TOV timer callback. Fail any pending IO requests.
1382 */ 1382 */
1383static void 1383static void
@@ -1392,7 +1392,7 @@ bfa_itnim_iotov(void *itnim_arg)
1392 bfa_cb_itnim_tov(itnim->ditn); 1392 bfa_cb_itnim_tov(itnim->ditn);
1393} 1393}
1394 1394
1395/** 1395/*
1396 * Start IO TOV timer for failing back pending IO requests in offline state. 1396 * Start IO TOV timer for failing back pending IO requests in offline state.
1397 */ 1397 */
1398static void 1398static void
@@ -1407,7 +1407,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1407 } 1407 }
1408} 1408}
1409 1409
1410/** 1410/*
1411 * Stop IO TOV timer. 1411 * Stop IO TOV timer.
1412 */ 1412 */
1413static void 1413static void
@@ -1419,7 +1419,7 @@ bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1419 } 1419 }
1420} 1420}
1421 1421
1422/** 1422/*
1423 * Stop IO TOV timer. 1423 * Stop IO TOV timer.
1424 */ 1424 */
1425static void 1425static void
@@ -1459,11 +1459,11 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1459 1459
1460 1460
1461 1461
1462/** 1462/*
1463 * bfa_itnim_public 1463 * bfa_itnim_public
1464 */ 1464 */
1465 1465
1466/** 1466/*
1467 * Itnim interrupt processing. 1467 * Itnim interrupt processing.
1468 */ 1468 */
1469void 1469void
@@ -1509,7 +1509,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1509 1509
1510 1510
1511 1511
1512/** 1512/*
1513 * bfa_itnim_api 1513 * bfa_itnim_api
1514 */ 1514 */
1515 1515
@@ -1552,7 +1552,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim)
1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); 1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1553} 1553}
1554 1554
1555/** 1555/*
1556 * Return true if itnim is considered offline for holding off IO request. 1556 * Return true if itnim is considered offline for holding off IO request.
1557 * IO is not held if itnim is being deleted. 1557 * IO is not held if itnim is being deleted.
1558 */ 1558 */
@@ -1597,17 +1597,17 @@ void
1597bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1597bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1598{ 1598{
1599 int j; 1599 int j;
1600 bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats)); 1600 memset(&itnim->stats, 0, sizeof(itnim->stats));
1601 bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); 1601 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1602 for (j = 0; j < BFA_IOBUCKET_MAX; j++) 1602 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1603 itnim->ioprofile.io_latency.min[j] = ~0; 1603 itnim->ioprofile.io_latency.min[j] = ~0;
1604} 1604}
1605 1605
1606/** 1606/*
1607 * BFA IO module state machine functions 1607 * BFA IO module state machine functions
1608 */ 1608 */
1609 1609
1610/** 1610/*
1611 * IO is not started (unallocated). 1611 * IO is not started (unallocated).
1612 */ 1612 */
1613static void 1613static void
@@ -1657,7 +1657,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1657 break; 1657 break;
1658 1658
1659 case BFA_IOIM_SM_ABORT: 1659 case BFA_IOIM_SM_ABORT:
1660 /** 1660 /*
1661 * IO in pending queue can get abort requests. Complete abort 1661 * IO in pending queue can get abort requests. Complete abort
1662 * requests immediately. 1662 * requests immediately.
1663 */ 1663 */
@@ -1672,7 +1672,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1672 } 1672 }
1673} 1673}
1674 1674
1675/** 1675/*
1676 * IO is waiting for SG pages. 1676 * IO is waiting for SG pages.
1677 */ 1677 */
1678static void 1678static void
@@ -1719,7 +1719,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1719 } 1719 }
1720} 1720}
1721 1721
1722/** 1722/*
1723 * IO is active. 1723 * IO is active.
1724 */ 1724 */
1725static void 1725static void
@@ -1803,7 +1803,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1803 } 1803 }
1804} 1804}
1805 1805
1806/** 1806/*
1807* IO is retried with new tag. 1807* IO is retried with new tag.
1808*/ 1808*/
1809static void 1809static void
@@ -1844,7 +1844,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1844 break; 1844 break;
1845 1845
1846 case BFA_IOIM_SM_ABORT: 1846 case BFA_IOIM_SM_ABORT:
1847 /** in this state IO abort is done. 1847 /* in this state IO abort is done.
1848 * Waiting for IO tag resource free. 1848 * Waiting for IO tag resource free.
1849 */ 1849 */
1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
@@ -1857,7 +1857,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1857 } 1857 }
1858} 1858}
1859 1859
1860/** 1860/*
1861 * IO is being aborted, waiting for completion from firmware. 1861 * IO is being aborted, waiting for completion from firmware.
1862 */ 1862 */
1863static void 1863static void
@@ -1919,7 +1919,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1919 } 1919 }
1920} 1920}
1921 1921
1922/** 1922/*
1923 * IO is being cleaned up (implicit abort), waiting for completion from 1923 * IO is being cleaned up (implicit abort), waiting for completion from
1924 * firmware. 1924 * firmware.
1925 */ 1925 */
@@ -1937,7 +1937,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1937 break; 1937 break;
1938 1938
1939 case BFA_IOIM_SM_ABORT: 1939 case BFA_IOIM_SM_ABORT:
1940 /** 1940 /*
1941 * IO is already being aborted implicitly 1941 * IO is already being aborted implicitly
1942 */ 1942 */
1943 ioim->io_cbfn = __bfa_cb_ioim_abort; 1943 ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -1969,7 +1969,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1969 break; 1969 break;
1970 1970
1971 case BFA_IOIM_SM_CLEANUP: 1971 case BFA_IOIM_SM_CLEANUP:
1972 /** 1972 /*
1973 * IO can be in cleanup state already due to TM command. 1973 * IO can be in cleanup state already due to TM command.
1974 * 2nd cleanup request comes from ITN offline event. 1974 * 2nd cleanup request comes from ITN offline event.
1975 */ 1975 */
@@ -1980,7 +1980,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1980 } 1980 }
1981} 1981}
1982 1982
1983/** 1983/*
1984 * IO is waiting for room in request CQ 1984 * IO is waiting for room in request CQ
1985 */ 1985 */
1986static void 1986static void
@@ -2024,7 +2024,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2024 } 2024 }
2025} 2025}
2026 2026
2027/** 2027/*
2028 * Active IO is being aborted, waiting for room in request CQ. 2028 * Active IO is being aborted, waiting for room in request CQ.
2029 */ 2029 */
2030static void 2030static void
@@ -2075,7 +2075,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2075 } 2075 }
2076} 2076}
2077 2077
2078/** 2078/*
2079 * Active IO is being cleaned up, waiting for room in request CQ. 2079 * Active IO is being cleaned up, waiting for room in request CQ.
2080 */ 2080 */
2081static void 2081static void
@@ -2091,7 +2091,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2091 break; 2091 break;
2092 2092
2093 case BFA_IOIM_SM_ABORT: 2093 case BFA_IOIM_SM_ABORT:
2094 /** 2094 /*
2095 * IO is alraedy being cleaned up implicitly 2095 * IO is alraedy being cleaned up implicitly
2096 */ 2096 */
2097 ioim->io_cbfn = __bfa_cb_ioim_abort; 2097 ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -2125,7 +2125,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2125 } 2125 }
2126} 2126}
2127 2127
2128/** 2128/*
2129 * IO bfa callback is pending. 2129 * IO bfa callback is pending.
2130 */ 2130 */
2131static void 2131static void
@@ -2152,7 +2152,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2152 } 2152 }
2153} 2153}
2154 2154
2155/** 2155/*
2156 * IO bfa callback is pending. IO resource cannot be freed. 2156 * IO bfa callback is pending. IO resource cannot be freed.
2157 */ 2157 */
2158static void 2158static void
@@ -2185,7 +2185,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2185 } 2185 }
2186} 2186}
2187 2187
2188/** 2188/*
2189 * IO is completed, waiting resource free from firmware. 2189 * IO is completed, waiting resource free from firmware.
2190 */ 2190 */
2191static void 2191static void
@@ -2214,7 +2214,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2214 2214
2215 2215
2216 2216
2217/** 2217/*
2218 * hal_ioim_private 2218 * hal_ioim_private
2219 */ 2219 */
2220 2220
@@ -2247,7 +2247,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2247 2247
2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; 2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2249 if (m->io_status == BFI_IOIM_STS_OK) { 2249 if (m->io_status == BFI_IOIM_STS_OK) {
2250 /** 2250 /*
2251 * setup sense information, if present 2251 * setup sense information, if present
2252 */ 2252 */
2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && 2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
@@ -2256,15 +2256,15 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2256 snsinfo = ioim->iosp->snsinfo; 2256 snsinfo = ioim->iosp->snsinfo;
2257 } 2257 }
2258 2258
2259 /** 2259 /*
2260 * setup residue value correctly for normal completions 2260 * setup residue value correctly for normal completions
2261 */ 2261 */
2262 if (m->resid_flags == FCP_RESID_UNDER) { 2262 if (m->resid_flags == FCP_RESID_UNDER) {
2263 residue = bfa_os_ntohl(m->residue); 2263 residue = be32_to_cpu(m->residue);
2264 bfa_stats(ioim->itnim, iocomp_underrun); 2264 bfa_stats(ioim->itnim, iocomp_underrun);
2265 } 2265 }
2266 if (m->resid_flags == FCP_RESID_OVER) { 2266 if (m->resid_flags == FCP_RESID_OVER) {
2267 residue = bfa_os_ntohl(m->residue); 2267 residue = be32_to_cpu(m->residue);
2268 residue = -residue; 2268 residue = -residue;
2269 bfa_stats(ioim->itnim, iocomp_overrun); 2269 bfa_stats(ioim->itnim, iocomp_overrun);
2270 } 2270 }
@@ -2327,7 +2327,7 @@ bfa_ioim_sgpg_alloced(void *cbarg)
2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328} 2328}
2329 2329
2330/** 2330/*
2331 * Send I/O request to firmware. 2331 * Send I/O request to firmware.
2332 */ 2332 */
2333static bfa_boolean_t 2333static bfa_boolean_t
@@ -2343,7 +2343,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2343 struct scatterlist *sg; 2343 struct scatterlist *sg;
2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2345 2345
2346 /** 2346 /*
2347 * check for room in queue to send request now 2347 * check for room in queue to send request now
2348 */ 2348 */
2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
@@ -2354,14 +2354,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2354 return BFA_FALSE; 2354 return BFA_FALSE;
2355 } 2355 }
2356 2356
2357 /** 2357 /*
2358 * build i/o request message next 2358 * build i/o request message next
2359 */ 2359 */
2360 m->io_tag = bfa_os_htons(ioim->iotag); 2360 m->io_tag = cpu_to_be16(ioim->iotag);
2361 m->rport_hdl = ioim->itnim->rport->fw_handle; 2361 m->rport_hdl = ioim->itnim->rport->fw_handle;
2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); 2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2363 2363
2364 /** 2364 /*
2365 * build inline IO SG element here 2365 * build inline IO SG element here
2366 */ 2366 */
2367 sge = &m->sges[0]; 2367 sge = &m->sges[0];
@@ -2387,18 +2387,17 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2387 sge->flags = BFI_SGE_PGDLEN; 2387 sge->flags = BFI_SGE_PGDLEN;
2388 bfa_sge_to_be(sge); 2388 bfa_sge_to_be(sge);
2389 2389
2390 /** 2390 /*
2391 * set up I/O command parameters 2391 * set up I/O command parameters
2392 */ 2392 */
2393 bfa_os_assign(m->cmnd, cmnd_z0); 2393 m->cmnd = cmnd_z0;
2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); 2394 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); 2395 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
2396 bfa_os_assign(m->cmnd.cdb, 2396 m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
2397 *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
2398 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2399 m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl); 2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2400 2399
2401 /** 2400 /*
2402 * set up I/O message header 2401 * set up I/O message header
2403 */ 2402 */
2404 switch (m->cmnd.iodir) { 2403 switch (m->cmnd.iodir) {
@@ -2427,28 +2426,28 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2427 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); 2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2428 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); 2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2429 2428
2430 /** 2429 /*
2431 * Handle large CDB (>16 bytes). 2430 * Handle large CDB (>16 bytes).
2432 */ 2431 */
2433 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - 2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
2434 FCP_CMND_CDB_LEN) / sizeof(u32); 2433 FCP_CMND_CDB_LEN) / sizeof(u32);
2435 if (m->cmnd.addl_cdb_len) { 2434 if (m->cmnd.addl_cdb_len) {
2436 bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) 2435 memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
2437 bfa_cb_ioim_get_cdb(ioim->dio) + 1, 2436 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
2438 m->cmnd.addl_cdb_len * sizeof(u32)); 2437 m->cmnd.addl_cdb_len * sizeof(u32));
2439 fcp_cmnd_fcpdl(&m->cmnd) = 2438 fcp_cmnd_fcpdl(&m->cmnd) =
2440 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio)); 2439 cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
2441 } 2440 }
2442#endif 2441#endif
2443 2442
2444 /** 2443 /*
2445 * queue I/O message to firmware 2444 * queue I/O message to firmware
2446 */ 2445 */
2447 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2446 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2448 return BFA_TRUE; 2447 return BFA_TRUE;
2449} 2448}
2450 2449
2451/** 2450/*
2452 * Setup any additional SG pages needed.Inline SG element is setup 2451 * Setup any additional SG pages needed.Inline SG element is setup
2453 * at queuing time. 2452 * at queuing time.
2454 */ 2453 */
@@ -2459,7 +2458,7 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2459 2458
2460 bfa_assert(ioim->nsges > BFI_SGE_INLINE); 2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2461 2460
2462 /** 2461 /*
2463 * allocate SG pages needed 2462 * allocate SG pages needed
2464 */ 2463 */
2465 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); 2464 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
@@ -2508,7 +2507,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2508 sge->sg_len = sg_dma_len(sg); 2507 sge->sg_len = sg_dma_len(sg);
2509 pgcumsz += sge->sg_len; 2508 pgcumsz += sge->sg_len;
2510 2509
2511 /** 2510 /*
2512 * set flags 2511 * set flags
2513 */ 2512 */
2514 if (i < (nsges - 1)) 2513 if (i < (nsges - 1))
@@ -2523,7 +2522,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2523 2522
2524 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); 2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2525 2524
2526 /** 2525 /*
2527 * set the link element of each page 2526 * set the link element of each page
2528 */ 2527 */
2529 if (sgeid == ioim->nsges) { 2528 if (sgeid == ioim->nsges) {
@@ -2540,7 +2539,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2540 } while (sgeid < ioim->nsges); 2539 } while (sgeid < ioim->nsges);
2541} 2540}
2542 2541
2543/** 2542/*
2544 * Send I/O abort request to firmware. 2543 * Send I/O abort request to firmware.
2545 */ 2544 */
2546static bfa_boolean_t 2545static bfa_boolean_t
@@ -2549,14 +2548,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2549 struct bfi_ioim_abort_req_s *m; 2548 struct bfi_ioim_abort_req_s *m;
2550 enum bfi_ioim_h2i msgop; 2549 enum bfi_ioim_h2i msgop;
2551 2550
2552 /** 2551 /*
2553 * check for room in queue to send request now 2552 * check for room in queue to send request now
2554 */ 2553 */
2555 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2554 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2556 if (!m) 2555 if (!m)
2557 return BFA_FALSE; 2556 return BFA_FALSE;
2558 2557
2559 /** 2558 /*
2560 * build i/o request message next 2559 * build i/o request message next
2561 */ 2560 */
2562 if (ioim->iosp->abort_explicit) 2561 if (ioim->iosp->abort_explicit)
@@ -2565,17 +2564,17 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2565 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; 2564 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2566 2565
2567 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); 2566 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2568 m->io_tag = bfa_os_htons(ioim->iotag); 2567 m->io_tag = cpu_to_be16(ioim->iotag);
2569 m->abort_tag = ++ioim->abort_tag; 2568 m->abort_tag = ++ioim->abort_tag;
2570 2569
2571 /** 2570 /*
2572 * queue I/O message to firmware 2571 * queue I/O message to firmware
2573 */ 2572 */
2574 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2573 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2575 return BFA_TRUE; 2574 return BFA_TRUE;
2576} 2575}
2577 2576
2578/** 2577/*
2579 * Call to resume any I/O requests waiting for room in request queue. 2578 * Call to resume any I/O requests waiting for room in request queue.
2580 */ 2579 */
2581static void 2580static void
@@ -2591,7 +2590,7 @@ bfa_ioim_qresume(void *cbarg)
2591static void 2590static void
2592bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) 2591bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2593{ 2592{
2594 /** 2593 /*
2595 * Move IO from itnim queue to fcpim global queue since itnim will be 2594 * Move IO from itnim queue to fcpim global queue since itnim will be
2596 * freed. 2595 * freed.
2597 */ 2596 */
@@ -2624,13 +2623,13 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2624 return BFA_TRUE; 2623 return BFA_TRUE;
2625} 2624}
2626 2625
2627/** 2626/*
2628 * or after the link comes back. 2627 * or after the link comes back.
2629 */ 2628 */
2630void 2629void
2631bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2630bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2632{ 2631{
2633 /** 2632 /*
2634 * If path tov timer expired, failback with PATHTOV status - these 2633 * If path tov timer expired, failback with PATHTOV status - these
2635 * IO requests are not normally retried by IO stack. 2634 * IO requests are not normally retried by IO stack.
2636 * 2635 *
@@ -2645,7 +2644,7 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2645 } 2644 }
2646 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 2645 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2647 2646
2648 /** 2647 /*
2649 * Move IO to fcpim global queue since itnim will be 2648 * Move IO to fcpim global queue since itnim will be
2650 * freed. 2649 * freed.
2651 */ 2650 */
@@ -2655,11 +2654,11 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2655 2654
2656 2655
2657 2656
2658/** 2657/*
2659 * hal_ioim_friend 2658 * hal_ioim_friend
2660 */ 2659 */
2661 2660
2662/** 2661/*
2663 * Memory allocation and initialization. 2662 * Memory allocation and initialization.
2664 */ 2663 */
2665void 2664void
@@ -2671,7 +2670,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2671 u8 *snsinfo; 2670 u8 *snsinfo;
2672 u32 snsbufsz; 2671 u32 snsbufsz;
2673 2672
2674 /** 2673 /*
2675 * claim memory first 2674 * claim memory first
2676 */ 2675 */
2677 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); 2676 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
@@ -2682,7 +2681,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2682 fcpim->ioim_sp_arr = iosp; 2681 fcpim->ioim_sp_arr = iosp;
2683 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2682 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2684 2683
2685 /** 2684 /*
2686 * Claim DMA memory for per IO sense data. 2685 * Claim DMA memory for per IO sense data.
2687 */ 2686 */
2688 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; 2687 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
@@ -2694,7 +2693,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2694 snsinfo = fcpim->snsbase.kva; 2693 snsinfo = fcpim->snsbase.kva;
2695 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); 2694 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2696 2695
2697 /** 2696 /*
2698 * Initialize ioim free queues 2697 * Initialize ioim free queues
2699 */ 2698 */
2700 INIT_LIST_HEAD(&fcpim->ioim_free_q); 2699 INIT_LIST_HEAD(&fcpim->ioim_free_q);
@@ -2706,7 +2705,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2706 /* 2705 /*
2707 * initialize IOIM 2706 * initialize IOIM
2708 */ 2707 */
2709 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s)); 2708 memset(ioim, 0, sizeof(struct bfa_ioim_s));
2710 ioim->iotag = i; 2709 ioim->iotag = i;
2711 ioim->bfa = fcpim->bfa; 2710 ioim->bfa = fcpim->bfa;
2712 ioim->fcpim = fcpim; 2711 ioim->fcpim = fcpim;
@@ -2723,7 +2722,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2723 } 2722 }
2724} 2723}
2725 2724
2726/** 2725/*
2727 * Driver detach time call. 2726 * Driver detach time call.
2728 */ 2727 */
2729void 2728void
@@ -2740,7 +2739,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2740 u16 iotag; 2739 u16 iotag;
2741 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; 2740 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2742 2741
2743 iotag = bfa_os_ntohs(rsp->io_tag); 2742 iotag = be16_to_cpu(rsp->io_tag);
2744 2743
2745 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2744 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2746 bfa_assert(ioim->iotag == iotag); 2745 bfa_assert(ioim->iotag == iotag);
@@ -2750,7 +2749,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2750 bfa_trc(ioim->bfa, rsp->reuse_io_tag); 2749 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2751 2750
2752 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) 2751 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2753 bfa_os_assign(ioim->iosp->comp_rspmsg, *m); 2752 ioim->iosp->comp_rspmsg = *m;
2754 2753
2755 switch (rsp->io_status) { 2754 switch (rsp->io_status) {
2756 case BFI_IOIM_STS_OK: 2755 case BFI_IOIM_STS_OK:
@@ -2823,7 +2822,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2823 struct bfa_ioim_s *ioim; 2822 struct bfa_ioim_s *ioim;
2824 u16 iotag; 2823 u16 iotag;
2825 2824
2826 iotag = bfa_os_ntohs(rsp->io_tag); 2825 iotag = be16_to_cpu(rsp->io_tag);
2827 2826
2828 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); 2827 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2829 bfa_assert(ioim->iotag == iotag); 2828 bfa_assert(ioim->iotag == iotag);
@@ -2837,7 +2836,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2837void 2836void
2838bfa_ioim_profile_start(struct bfa_ioim_s *ioim) 2837bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2839{ 2838{
2840 ioim->start_time = bfa_os_get_clock(); 2839 ioim->start_time = jiffies;
2841} 2840}
2842 2841
2843void 2842void
@@ -2845,7 +2844,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2845{ 2844{
2846 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2845 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2847 u32 index = bfa_ioim_get_index(fcp_dl); 2846 u32 index = bfa_ioim_get_index(fcp_dl);
2848 u64 end_time = bfa_os_get_clock(); 2847 u64 end_time = jiffies;
2849 struct bfa_itnim_latency_s *io_lat = 2848 struct bfa_itnim_latency_s *io_lat =
2850 &(ioim->itnim->ioprofile.io_latency); 2849 &(ioim->itnim->ioprofile.io_latency);
2851 u32 val = (u32)(end_time - ioim->start_time); 2850 u32 val = (u32)(end_time - ioim->start_time);
@@ -2859,7 +2858,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2859 io_lat->max[index] : val; 2858 io_lat->max[index] : val;
2860 io_lat->avg[index] += val; 2859 io_lat->avg[index] += val;
2861} 2860}
2862/** 2861/*
2863 * Called by itnim to clean up IO while going offline. 2862 * Called by itnim to clean up IO while going offline.
2864 */ 2863 */
2865void 2864void
@@ -2882,7 +2881,7 @@ bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2882 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2881 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2883} 2882}
2884 2883
2885/** 2884/*
2886 * IOC failure handling. 2885 * IOC failure handling.
2887 */ 2886 */
2888void 2887void
@@ -2893,7 +2892,7 @@ bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2893 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); 2892 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2894} 2893}
2895 2894
2896/** 2895/*
2897 * IO offline TOV popped. Fail the pending IO. 2896 * IO offline TOV popped. Fail the pending IO.
2898 */ 2897 */
2899void 2898void
@@ -2905,11 +2904,11 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim)
2905 2904
2906 2905
2907 2906
2908/** 2907/*
2909 * hal_ioim_api 2908 * hal_ioim_api
2910 */ 2909 */
2911 2910
2912/** 2911/*
2913 * Allocate IOIM resource for initiator mode I/O request. 2912 * Allocate IOIM resource for initiator mode I/O request.
2914 */ 2913 */
2915struct bfa_ioim_s * 2914struct bfa_ioim_s *
@@ -2919,7 +2918,7 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2919 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2918 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2920 struct bfa_ioim_s *ioim; 2919 struct bfa_ioim_s *ioim;
2921 2920
2922 /** 2921 /*
2923 * alocate IOIM resource 2922 * alocate IOIM resource
2924 */ 2923 */
2925 bfa_q_deq(&fcpim->ioim_free_q, &ioim); 2924 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
@@ -2970,7 +2969,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
2970 2969
2971 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2972 2971
2973 /** 2972 /*
2974 * Obtain the queue over which this request has to be issued 2973 * Obtain the queue over which this request has to be issued
2975 */ 2974 */
2976 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? 2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
@@ -2980,7 +2979,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
2980 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2981} 2980}
2982 2981
2983/** 2982/*
2984 * Driver I/O abort request. 2983 * Driver I/O abort request.
2985 */ 2984 */
2986bfa_status_t 2985bfa_status_t
@@ -2999,11 +2998,11 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
2999} 2998}
3000 2999
3001 3000
3002/** 3001/*
3003 * BFA TSKIM state machine functions 3002 * BFA TSKIM state machine functions
3004 */ 3003 */
3005 3004
3006/** 3005/*
3007 * Task management command beginning state. 3006 * Task management command beginning state.
3008 */ 3007 */
3009static void 3008static void
@@ -3016,7 +3015,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3016 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 3015 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3017 bfa_tskim_gather_ios(tskim); 3016 bfa_tskim_gather_ios(tskim);
3018 3017
3019 /** 3018 /*
3020 * If device is offline, do not send TM on wire. Just cleanup 3019 * If device is offline, do not send TM on wire. Just cleanup
3021 * any pending IO requests and complete TM request. 3020 * any pending IO requests and complete TM request.
3022 */ 3021 */
@@ -3040,7 +3039,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3040 } 3039 }
3041} 3040}
3042 3041
3043/** 3042/*
3044 * brief 3043 * brief
3045 * TM command is active, awaiting completion from firmware to 3044 * TM command is active, awaiting completion from firmware to
3046 * cleanup IO requests in TM scope. 3045 * cleanup IO requests in TM scope.
@@ -3077,7 +3076,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3077 } 3076 }
3078} 3077}
3079 3078
3080/** 3079/*
3081 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3082 * completion event from firmware. 3081 * completion event from firmware.
3083 */ 3082 */
@@ -3088,7 +3087,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3088 3087
3089 switch (event) { 3088 switch (event) {
3090 case BFA_TSKIM_SM_DONE: 3089 case BFA_TSKIM_SM_DONE:
3091 /** 3090 /*
3092 * Ignore and wait for ABORT completion from firmware. 3091 * Ignore and wait for ABORT completion from firmware.
3093 */ 3092 */
3094 break; 3093 break;
@@ -3121,7 +3120,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3121 break; 3120 break;
3122 3121
3123 case BFA_TSKIM_SM_CLEANUP: 3122 case BFA_TSKIM_SM_CLEANUP:
3124 /** 3123 /*
3125 * Ignore, TM command completed on wire. 3124 * Ignore, TM command completed on wire.
3126 * Notify TM conmpletion on IO cleanup completion. 3125 * Notify TM conmpletion on IO cleanup completion.
3127 */ 3126 */
@@ -3138,7 +3137,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3138 } 3137 }
3139} 3138}
3140 3139
3141/** 3140/*
3142 * Task management command is waiting for room in request CQ 3141 * Task management command is waiting for room in request CQ
3143 */ 3142 */
3144static void 3143static void
@@ -3153,7 +3152,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3153 break; 3152 break;
3154 3153
3155 case BFA_TSKIM_SM_CLEANUP: 3154 case BFA_TSKIM_SM_CLEANUP:
3156 /** 3155 /*
3157 * No need to send TM on wire since ITN is offline. 3156 * No need to send TM on wire since ITN is offline.
3158 */ 3157 */
3159 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 3158 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
@@ -3173,7 +3172,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3173 } 3172 }
3174} 3173}
3175 3174
3176/** 3175/*
3177 * Task management command is active, awaiting for room in request CQ 3176 * Task management command is active, awaiting for room in request CQ
3178 * to send clean up request. 3177 * to send clean up request.
3179 */ 3178 */
@@ -3186,7 +3185,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3186 switch (event) { 3185 switch (event) {
3187 case BFA_TSKIM_SM_DONE: 3186 case BFA_TSKIM_SM_DONE:
3188 bfa_reqq_wcancel(&tskim->reqq_wait); 3187 bfa_reqq_wcancel(&tskim->reqq_wait);
3189 /** 3188 /*
3190 * 3189 *
3191 * Fall through !!! 3190 * Fall through !!!
3192 */ 3191 */
@@ -3208,7 +3207,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3208 } 3207 }
3209} 3208}
3210 3209
3211/** 3210/*
3212 * BFA callback is pending 3211 * BFA callback is pending
3213 */ 3212 */
3214static void 3213static void
@@ -3236,7 +3235,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3236 3235
3237 3236
3238 3237
3239/** 3238/*
3240 * hal_tskim_private 3239 * hal_tskim_private
3241 */ 3240 */
3242 3241
@@ -3289,7 +3288,7 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3289 return BFA_FALSE; 3288 return BFA_FALSE;
3290} 3289}
3291 3290
3292/** 3291/*
3293 * Gather affected IO requests and task management commands. 3292 * Gather affected IO requests and task management commands.
3294 */ 3293 */
3295static void 3294static void
@@ -3301,7 +3300,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3301 3300
3302 INIT_LIST_HEAD(&tskim->io_q); 3301 INIT_LIST_HEAD(&tskim->io_q);
3303 3302
3304 /** 3303 /*
3305 * Gather any active IO requests first. 3304 * Gather any active IO requests first.
3306 */ 3305 */
3307 list_for_each_safe(qe, qen, &itnim->io_q) { 3306 list_for_each_safe(qe, qen, &itnim->io_q) {
@@ -3313,7 +3312,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3313 } 3312 }
3314 } 3313 }
3315 3314
3316 /** 3315 /*
3317 * Failback any pending IO requests immediately. 3316 * Failback any pending IO requests immediately.
3318 */ 3317 */
3319 list_for_each_safe(qe, qen, &itnim->pending_q) { 3318 list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -3327,7 +3326,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3327 } 3326 }
3328} 3327}
3329 3328
3330/** 3329/*
3331 * IO cleanup completion 3330 * IO cleanup completion
3332 */ 3331 */
3333static void 3332static void
@@ -3339,7 +3338,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg)
3339 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); 3338 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3340} 3339}
3341 3340
3342/** 3341/*
3343 * Gather affected IO requests and task management commands. 3342 * Gather affected IO requests and task management commands.
3344 */ 3343 */
3345static void 3344static void
@@ -3359,7 +3358,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3359 bfa_wc_wait(&tskim->wc); 3358 bfa_wc_wait(&tskim->wc);
3360} 3359}
3361 3360
3362/** 3361/*
3363 * Send task management request to firmware. 3362 * Send task management request to firmware.
3364 */ 3363 */
3365static bfa_boolean_t 3364static bfa_boolean_t
@@ -3368,33 +3367,33 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
3368 struct bfa_itnim_s *itnim = tskim->itnim; 3367 struct bfa_itnim_s *itnim = tskim->itnim;
3369 struct bfi_tskim_req_s *m; 3368 struct bfi_tskim_req_s *m;
3370 3369
3371 /** 3370 /*
3372 * check for room in queue to send request now 3371 * check for room in queue to send request now
3373 */ 3372 */
3374 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3373 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3375 if (!m) 3374 if (!m)
3376 return BFA_FALSE; 3375 return BFA_FALSE;
3377 3376
3378 /** 3377 /*
3379 * build i/o request message next 3378 * build i/o request message next
3380 */ 3379 */
3381 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, 3380 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3382 bfa_lpuid(tskim->bfa)); 3381 bfa_lpuid(tskim->bfa));
3383 3382
3384 m->tsk_tag = bfa_os_htons(tskim->tsk_tag); 3383 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3385 m->itn_fhdl = tskim->itnim->rport->fw_handle; 3384 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3386 m->t_secs = tskim->tsecs; 3385 m->t_secs = tskim->tsecs;
3387 m->lun = tskim->lun; 3386 m->lun = tskim->lun;
3388 m->tm_flags = tskim->tm_cmnd; 3387 m->tm_flags = tskim->tm_cmnd;
3389 3388
3390 /** 3389 /*
3391 * queue I/O message to firmware 3390 * queue I/O message to firmware
3392 */ 3391 */
3393 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3392 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3394 return BFA_TRUE; 3393 return BFA_TRUE;
3395} 3394}
3396 3395
3397/** 3396/*
3398 * Send abort request to cleanup an active TM to firmware. 3397 * Send abort request to cleanup an active TM to firmware.
3399 */ 3398 */
3400static bfa_boolean_t 3399static bfa_boolean_t
@@ -3403,29 +3402,29 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3403 struct bfa_itnim_s *itnim = tskim->itnim; 3402 struct bfa_itnim_s *itnim = tskim->itnim;
3404 struct bfi_tskim_abortreq_s *m; 3403 struct bfi_tskim_abortreq_s *m;
3405 3404
3406 /** 3405 /*
3407 * check for room in queue to send request now 3406 * check for room in queue to send request now
3408 */ 3407 */
3409 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3408 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3410 if (!m) 3409 if (!m)
3411 return BFA_FALSE; 3410 return BFA_FALSE;
3412 3411
3413 /** 3412 /*
3414 * build i/o request message next 3413 * build i/o request message next
3415 */ 3414 */
3416 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, 3415 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3417 bfa_lpuid(tskim->bfa)); 3416 bfa_lpuid(tskim->bfa));
3418 3417
3419 m->tsk_tag = bfa_os_htons(tskim->tsk_tag); 3418 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3420 3419
3421 /** 3420 /*
3422 * queue I/O message to firmware 3421 * queue I/O message to firmware
3423 */ 3422 */
3424 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3423 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3425 return BFA_TRUE; 3424 return BFA_TRUE;
3426} 3425}
3427 3426
3428/** 3427/*
3429 * Call to resume task management cmnd waiting for room in request queue. 3428 * Call to resume task management cmnd waiting for room in request queue.
3430 */ 3429 */
3431static void 3430static void
@@ -3437,7 +3436,7 @@ bfa_tskim_qresume(void *cbarg)
3437 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); 3436 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3438} 3437}
3439 3438
3440/** 3439/*
3441 * Cleanup IOs associated with a task mangement command on IOC failures. 3440 * Cleanup IOs associated with a task mangement command on IOC failures.
3442 */ 3441 */
3443static void 3442static void
@@ -3454,11 +3453,11 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3454 3453
3455 3454
3456 3455
3457/** 3456/*
3458 * hal_tskim_friend 3457 * hal_tskim_friend
3459 */ 3458 */
3460 3459
3461/** 3460/*
3462 * Notification on completions from related ioim. 3461 * Notification on completions from related ioim.
3463 */ 3462 */
3464void 3463void
@@ -3467,7 +3466,7 @@ bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3467 bfa_wc_down(&tskim->wc); 3466 bfa_wc_down(&tskim->wc);
3468} 3467}
3469 3468
3470/** 3469/*
3471 * Handle IOC h/w failure notification from itnim. 3470 * Handle IOC h/w failure notification from itnim.
3472 */ 3471 */
3473void 3472void
@@ -3478,7 +3477,7 @@ bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3478 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); 3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3479} 3478}
3480 3479
3481/** 3480/*
3482 * Cleanup TM command and associated IOs as part of ITNIM offline. 3481 * Cleanup TM command and associated IOs as part of ITNIM offline.
3483 */ 3482 */
3484void 3483void
@@ -3489,7 +3488,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3489 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); 3488 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3490} 3489}
3491 3490
3492/** 3491/*
3493 * Memory allocation and initialization. 3492 * Memory allocation and initialization.
3494 */ 3493 */
3495void 3494void
@@ -3507,7 +3506,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3507 /* 3506 /*
3508 * initialize TSKIM 3507 * initialize TSKIM
3509 */ 3508 */
3510 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s)); 3509 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3511 tskim->tsk_tag = i; 3510 tskim->tsk_tag = i;
3512 tskim->bfa = fcpim->bfa; 3511 tskim->bfa = fcpim->bfa;
3513 tskim->fcpim = fcpim; 3512 tskim->fcpim = fcpim;
@@ -3525,7 +3524,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3525void 3524void
3526bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) 3525bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3527{ 3526{
3528 /** 3527 /*
3529 * @todo 3528 * @todo
3530 */ 3529 */
3531} 3530}
@@ -3536,14 +3535,14 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3536 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3535 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3537 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; 3536 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3538 struct bfa_tskim_s *tskim; 3537 struct bfa_tskim_s *tskim;
3539 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag); 3538 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3540 3539
3541 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); 3540 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3542 bfa_assert(tskim->tsk_tag == tsk_tag); 3541 bfa_assert(tskim->tsk_tag == tsk_tag);
3543 3542
3544 tskim->tsk_status = rsp->tsk_status; 3543 tskim->tsk_status = rsp->tsk_status;
3545 3544
3546 /** 3545 /*
3547 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort 3546 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3548 * requests. All other statuses are for normal completions. 3547 * requests. All other statuses are for normal completions.
3549 */ 3548 */
@@ -3558,7 +3557,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3558 3557
3559 3558
3560 3559
3561/** 3560/*
3562 * hal_tskim_api 3561 * hal_tskim_api
3563 */ 3562 */
3564 3563
@@ -3585,7 +3584,7 @@ bfa_tskim_free(struct bfa_tskim_s *tskim)
3585 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3586} 3585}
3587 3586
3588/** 3587/*
3589 * Start a task management command. 3588 * Start a task management command.
3590 * 3589 *
3591 * @param[in] tskim BFA task management command instance 3590 * @param[in] tskim BFA task management command instance
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 3bf343160aa..db53717eeb4 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -104,7 +104,7 @@ struct bfa_fcpim_mod_s {
104 bfa_fcpim_profile_t profile_start; 104 bfa_fcpim_profile_t profile_start;
105}; 105};
106 106
107/** 107/*
108 * BFA IO (initiator mode) 108 * BFA IO (initiator mode)
109 */ 109 */
110struct bfa_ioim_s { 110struct bfa_ioim_s {
@@ -137,7 +137,7 @@ struct bfa_ioim_sp_s {
137 struct bfa_tskim_s *tskim; /* Relevant TM cmd */ 137 struct bfa_tskim_s *tskim; /* Relevant TM cmd */
138}; 138};
139 139
140/** 140/*
141 * BFA Task management command (initiator mode) 141 * BFA Task management command (initiator mode)
142 */ 142 */
143struct bfa_tskim_s { 143struct bfa_tskim_s {
@@ -160,7 +160,7 @@ struct bfa_tskim_s {
160}; 160};
161 161
162 162
163/** 163/*
164 * BFA i-t-n (initiator mode) 164 * BFA i-t-n (initiator mode)
165 */ 165 */
166struct bfa_itnim_s { 166struct bfa_itnim_s {
@@ -303,7 +303,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
303 struct bfa_itnim_ioprofile_s *ioprofile); 303 struct bfa_itnim_ioprofile_s *ioprofile);
304#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) 304#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
305 305
306/** 306/*
307 * BFA completion callback for bfa_itnim_online(). 307 * BFA completion callback for bfa_itnim_online().
308 * 308 *
309 * @param[in] itnim FCS or driver itnim instance 309 * @param[in] itnim FCS or driver itnim instance
@@ -312,7 +312,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
312 */ 312 */
313void bfa_cb_itnim_online(void *itnim); 313void bfa_cb_itnim_online(void *itnim);
314 314
315/** 315/*
316 * BFA completion callback for bfa_itnim_offline(). 316 * BFA completion callback for bfa_itnim_offline().
317 * 317 *
318 * @param[in] itnim FCS or driver itnim instance 318 * @param[in] itnim FCS or driver itnim instance
@@ -323,7 +323,7 @@ void bfa_cb_itnim_offline(void *itnim);
323void bfa_cb_itnim_tov_begin(void *itnim); 323void bfa_cb_itnim_tov_begin(void *itnim);
324void bfa_cb_itnim_tov(void *itnim); 324void bfa_cb_itnim_tov(void *itnim);
325 325
326/** 326/*
327 * BFA notification to FCS/driver for second level error recovery. 327 * BFA notification to FCS/driver for second level error recovery.
328 * 328 *
329 * Atleast one I/O request has timedout and target is unresponsive to 329 * Atleast one I/O request has timedout and target is unresponsive to
@@ -351,7 +351,7 @@ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
351 bfa_boolean_t iotov); 351 bfa_boolean_t iotov);
352 352
353 353
354/** 354/*
355 * I/O completion notification. 355 * I/O completion notification.
356 * 356 *
357 * @param[in] dio driver IO structure 357 * @param[in] dio driver IO structure
@@ -368,7 +368,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
368 u8 scsi_status, int sns_len, 368 u8 scsi_status, int sns_len,
369 u8 *sns_info, s32 residue); 369 u8 *sns_info, s32 residue);
370 370
371/** 371/*
372 * I/O good completion notification. 372 * I/O good completion notification.
373 * 373 *
374 * @param[in] dio driver IO structure 374 * @param[in] dio driver IO structure
@@ -377,7 +377,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
377 */ 377 */
378void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); 378void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
379 379
380/** 380/*
381 * I/O abort completion notification 381 * I/O abort completion notification
382 * 382 *
383 * @param[in] dio driver IO that was aborted 383 * @param[in] dio driver IO that was aborted
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 9cebbe30a67..c94502dfac6 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfa_fcs.c BFA FCS main 19 * bfa_fcs.c BFA FCS main
20 */ 20 */
21 21
@@ -25,7 +25,7 @@
25 25
26BFA_TRC_FILE(FCS, FCS); 26BFA_TRC_FILE(FCS, FCS);
27 27
28/** 28/*
29 * FCS sub-modules 29 * FCS sub-modules
30 */ 30 */
31struct bfa_fcs_mod_s { 31struct bfa_fcs_mod_s {
@@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
43 bfa_fcs_fabric_modexit }, 43 bfa_fcs_fabric_modexit },
44}; 44};
45 45
46/** 46/*
47 * fcs_api BFA FCS API 47 * fcs_api BFA FCS API
48 */ 48 */
49 49
@@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
58 58
59 59
60 60
61/** 61/*
62 * fcs_api BFA FCS API 62 * fcs_api BFA FCS API
63 */ 63 */
64 64
65/** 65/*
66 * fcs attach -- called once to initialize data structures at driver attach time 66 * fcs attach -- called once to initialize data structures at driver attach time
67 */ 67 */
68void 68void
@@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
86 } 86 }
87} 87}
88 88
89/** 89/*
90 * fcs initialization, called once after bfa initialization is complete 90 * fcs initialization, called once after bfa initialization is complete
91 */ 91 */
92void 92void
@@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
110 } 110 }
111} 111}
112 112
113/** 113/*
114 * Start FCS operations. 114 * Start FCS operations.
115 */ 115 */
116void 116void
@@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
119 bfa_fcs_fabric_modstart(fcs); 119 bfa_fcs_fabric_modstart(fcs);
120} 120}
121 121
122/** 122/*
123 * brief 123 * brief
124 * FCS driver details initialization. 124 * FCS driver details initialization.
125 * 125 *
@@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
138 bfa_fcs_fabric_psymb_init(&fcs->fabric); 138 bfa_fcs_fabric_psymb_init(&fcs->fabric);
139} 139}
140 140
141/** 141/*
142 * brief 142 * brief
143 * FCS FDMI Driver Parameter Initialization 143 * FCS FDMI Driver Parameter Initialization
144 * 144 *
@@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
154 fcs->fdmi_enabled = fdmi_enable; 154 fcs->fdmi_enabled = fdmi_enable;
155 155
156} 156}
157/** 157/*
158 * brief 158 * brief
159 * FCS instance cleanup and exit. 159 * FCS instance cleanup and exit.
160 * 160 *
@@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
196 bfa_wc_down(&fcs->wc); 196 bfa_wc_down(&fcs->wc);
197} 197}
198 198
199/** 199/*
200 * Fabric module implementation. 200 * Fabric module implementation.
201 */ 201 */
202 202
@@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
232 u32 rsp_len, 232 u32 rsp_len,
233 u32 resid_len, 233 u32 resid_len,
234 struct fchs_s *rspfchs); 234 struct fchs_s *rspfchs);
235/** 235/*
236 * fcs_fabric_sm fabric state machine functions 236 * fcs_fabric_sm fabric state machine functions
237 */ 237 */
238 238
239/** 239/*
240 * Fabric state machine events 240 * Fabric state machine events
241 */ 241 */
242enum bfa_fcs_fabric_event { 242enum bfa_fcs_fabric_event {
@@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
286 enum bfa_fcs_fabric_event event); 286 enum bfa_fcs_fabric_event event);
287static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, 287static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
288 enum bfa_fcs_fabric_event event); 288 enum bfa_fcs_fabric_event event);
289/** 289/*
290 * Beginning state before fabric creation. 290 * Beginning state before fabric creation.
291 */ 291 */
292static void 292static void
@@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
312 } 312 }
313} 313}
314 314
315/** 315/*
316 * Beginning state before fabric creation. 316 * Beginning state before fabric creation.
317 */ 317 */
318static void 318static void
@@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
345 } 345 }
346} 346}
347 347
348/** 348/*
349 * Link is down, awaiting LINK UP event from port. This is also the 349 * Link is down, awaiting LINK UP event from port. This is also the
350 * first state at fabric creation. 350 * first state at fabric creation.
351 */ 351 */
@@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
375 } 375 }
376} 376}
377 377
378/** 378/*
379 * FLOGI is in progress, awaiting FLOGI reply. 379 * FLOGI is in progress, awaiting FLOGI reply.
380 */ 380 */
381static void 381static void
@@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
468 } 468 }
469} 469}
470 470
471/** 471/*
472 * Authentication is in progress, awaiting authentication results. 472 * Authentication is in progress, awaiting authentication results.
473 */ 473 */
474static void 474static void
@@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
508 } 508 }
509} 509}
510 510
511/** 511/*
512 * Authentication failed 512 * Authentication failed
513 */ 513 */
514static void 514static void
@@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
534 } 534 }
535} 535}
536 536
537/** 537/*
538 * Port is in loopback mode. 538 * Port is in loopback mode.
539 */ 539 */
540static void 540static void
@@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
560 } 560 }
561} 561}
562 562
563/** 563/*
564 * There is no attached fabric - private loop or NPort-to-NPort topology. 564 * There is no attached fabric - private loop or NPort-to-NPort topology.
565 */ 565 */
566static void 566static void
@@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
593 } 593 }
594} 594}
595 595
596/** 596/*
597 * Fabric is online - normal operating state. 597 * Fabric is online - normal operating state.
598 */ 598 */
599static void 599static void
@@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
628 } 628 }
629} 629}
630 630
631/** 631/*
632 * Exchanging virtual fabric parameters. 632 * Exchanging virtual fabric parameters.
633 */ 633 */
634static void 634static void
@@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
652 } 652 }
653} 653}
654 654
655/** 655/*
656 * EVFP exchange complete and VFT tagging is enabled. 656 * EVFP exchange complete and VFT tagging is enabled.
657 */ 657 */
658static void 658static void
@@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
663 bfa_trc(fabric->fcs, event); 663 bfa_trc(fabric->fcs, event);
664} 664}
665 665
666/** 666/*
667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). 667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
668 */ 668 */
669static void 669static void
@@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
684 fabric->event_arg.swp_vfid); 684 fabric->event_arg.swp_vfid);
685} 685}
686 686
687/** 687/*
688 * Fabric is being deleted, awaiting vport delete completions. 688 * Fabric is being deleted, awaiting vport delete completions.
689 */ 689 */
690static void 690static void
@@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
714 714
715 715
716 716
717/** 717/*
718 * fcs_fabric_private fabric private functions 718 * fcs_fabric_private fabric private functions
719 */ 719 */
720 720
@@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); 728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
729} 729}
730 730
731/** 731/*
732 * Port Symbolic Name Creation for base port. 732 * Port Symbolic Name Creation for base port.
733 */ 733 */
734void 734void
@@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; 789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
790} 790}
791 791
792/** 792/*
793 * bfa lps login completion callback 793 * bfa lps login completion callback
794 */ 794 */
795void 795void
@@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
867 bfa_trc(fabric->fcs, fabric->is_npiv); 867 bfa_trc(fabric->fcs, fabric->is_npiv);
868 bfa_trc(fabric->fcs, fabric->is_auth); 868 bfa_trc(fabric->fcs, fabric->is_auth);
869} 869}
870/** 870/*
871 * Allocate and send FLOGI. 871 * Allocate and send FLOGI.
872 */ 872 */
873static void 873static void
@@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
897 bfa_fcs_fabric_set_opertype(fabric); 897 bfa_fcs_fabric_set_opertype(fabric);
898 fabric->stats.fabric_onlines++; 898 fabric->stats.fabric_onlines++;
899 899
900 /** 900 /*
901 * notify online event to base and then virtual ports 901 * notify online event to base and then virtual ports
902 */ 902 */
903 bfa_fcs_lport_online(&fabric->bport); 903 bfa_fcs_lport_online(&fabric->bport);
@@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
917 bfa_trc(fabric->fcs, fabric->fabric_name); 917 bfa_trc(fabric->fcs, fabric->fabric_name);
918 fabric->stats.fabric_offlines++; 918 fabric->stats.fabric_offlines++;
919 919
920 /** 920 /*
921 * notify offline event first to vports and then base port. 921 * notify offline event first to vports and then base port.
922 */ 922 */
923 list_for_each_safe(qe, qen, &fabric->vport_q) { 923 list_for_each_safe(qe, qen, &fabric->vport_q) {
@@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg)
939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); 939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
940} 940}
941 941
942/** 942/*
943 * Delete all vports and wait for vport delete completions. 943 * Delete all vports and wait for vport delete completions.
944 */ 944 */
945static void 945static void
@@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); 965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
966} 966}
967 967
968/** 968/*
969 * fcs_fabric_public fabric public functions 969 * fcs_fabric_public fabric public functions
970 */ 970 */
971 971
972/** 972/*
973 * Attach time initialization. 973 * Attach time initialization.
974 */ 974 */
975void 975void
@@ -978,9 +978,9 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
978 struct bfa_fcs_fabric_s *fabric; 978 struct bfa_fcs_fabric_s *fabric;
979 979
980 fabric = &fcs->fabric; 980 fabric = &fcs->fabric;
981 bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); 981 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
982 982
983 /** 983 /*
984 * Initialize base fabric. 984 * Initialize base fabric.
985 */ 985 */
986 fabric->fcs = fcs; 986 fabric->fcs = fcs;
@@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
989 fabric->lps = bfa_lps_alloc(fcs->bfa); 989 fabric->lps = bfa_lps_alloc(fcs->bfa);
990 bfa_assert(fabric->lps); 990 bfa_assert(fabric->lps);
991 991
992 /** 992 /*
993 * Initialize fabric delete completion handler. Fabric deletion is 993 * Initialize fabric delete completion handler. Fabric deletion is
994 * complete when the last vport delete is complete. 994 * complete when the last vport delete is complete.
995 */ 995 */
@@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
1007 bfa_trc(fcs, 0); 1007 bfa_trc(fcs, 0);
1008} 1008}
1009 1009
1010/** 1010/*
1011 * Module cleanup 1011 * Module cleanup
1012 */ 1012 */
1013void 1013void
@@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1017 1017
1018 bfa_trc(fcs, 0); 1018 bfa_trc(fcs, 0);
1019 1019
1020 /** 1020 /*
1021 * Cleanup base fabric. 1021 * Cleanup base fabric.
1022 */ 1022 */
1023 fabric = &fcs->fabric; 1023 fabric = &fcs->fabric;
@@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); 1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
1026} 1026}
1027 1027
1028/** 1028/*
1029 * Fabric module start -- kick starts FCS actions 1029 * Fabric module start -- kick starts FCS actions
1030 */ 1030 */
1031void 1031void
@@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); 1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
1039} 1039}
1040 1040
1041/** 1041/*
1042 * Suspend fabric activity as part of driver suspend. 1042 * Suspend fabric activity as part of driver suspend.
1043 */ 1043 */
1044void 1044void
@@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
1064 return fabric->oper_type; 1064 return fabric->oper_type;
1065} 1065}
1066 1066
1067/** 1067/*
1068 * Link up notification from BFA physical port module. 1068 * Link up notification from BFA physical port module.
1069 */ 1069 */
1070void 1070void
@@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); 1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
1075} 1075}
1076 1076
1077/** 1077/*
1078 * Link down notification from BFA physical port module. 1078 * Link down notification from BFA physical port module.
1079 */ 1079 */
1080void 1080void
@@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); 1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
1085} 1085}
1086 1086
1087/** 1087/*
1088 * A child vport is being created in the fabric. 1088 * A child vport is being created in the fabric.
1089 * 1089 *
1090 * Call from vport module at vport creation. A list of base port and vports 1090 * Call from vport module at vport creation. A list of base port and vports
@@ -1099,7 +1099,7 @@ void
1099bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 1099bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
1100 struct bfa_fcs_vport_s *vport) 1100 struct bfa_fcs_vport_s *vport)
1101{ 1101{
1102 /** 1102 /*
1103 * - add vport to fabric's vport_q 1103 * - add vport to fabric's vport_q
1104 */ 1104 */
1105 bfa_trc(fabric->fcs, fabric->vf_id); 1105 bfa_trc(fabric->fcs, fabric->vf_id);
@@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
1109 bfa_wc_up(&fabric->wc); 1109 bfa_wc_up(&fabric->wc);
1110} 1110}
1111 1111
1112/** 1112/*
1113 * A child vport is being deleted from fabric. 1113 * A child vport is being deleted from fabric.
1114 * 1114 *
1115 * Vport is being deleted. 1115 * Vport is being deleted.
@@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
1123 bfa_wc_down(&fabric->wc); 1123 bfa_wc_down(&fabric->wc);
1124} 1124}
1125 1125
1126/** 1126/*
1127 * Base port is deleted. 1127 * Base port is deleted.
1128 */ 1128 */
1129void 1129void
@@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
1133} 1133}
1134 1134
1135 1135
1136/** 1136/*
1137 * Check if fabric is online. 1137 * Check if fabric is online.
1138 * 1138 *
1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf. 1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); 1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
1147} 1147}
1148 1148
1149/** 1149/*
1150 * brief 1150 * brief
1151 * 1151 *
1152 */ 1152 */
@@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
1158 return BFA_STATUS_OK; 1158 return BFA_STATUS_OK;
1159} 1159}
1160 1160
1161/** 1161/*
1162 * Lookup for a vport withing a fabric given its pwwn 1162 * Lookup for a vport withing a fabric given its pwwn
1163 */ 1163 */
1164struct bfa_fcs_vport_s * 1164struct bfa_fcs_vport_s *
@@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1176 return NULL; 1176 return NULL;
1177} 1177}
1178 1178
1179/** 1179/*
1180 * In a given fabric, return the number of lports. 1180 * In a given fabric, return the number of lports.
1181 * 1181 *
1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf. 1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1214 1214
1215 return oui; 1215 return oui;
1216} 1216}
1217/** 1217/*
1218 * Unsolicited frame receive handling. 1218 * Unsolicited frame receive handling.
1219 */ 1219 */
1220void 1220void
@@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1230 bfa_trc(fabric->fcs, len); 1230 bfa_trc(fabric->fcs, len);
1231 bfa_trc(fabric->fcs, pid); 1231 bfa_trc(fabric->fcs, pid);
1232 1232
1233 /** 1233 /*
1234 * Look for our own FLOGI frames being looped back. This means an 1234 * Look for our own FLOGI frames being looped back. This means an
1235 * external loopback cable is in place. Our own FLOGI frames are 1235 * external loopback cable is in place. Our own FLOGI frames are
1236 * sometimes looped back when switch port gets temporarily bypassed. 1236 * sometimes looped back when switch port gets temporarily bypassed.
@@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1242 return; 1242 return;
1243 } 1243 }
1244 1244
1245 /** 1245 /*
1246 * FLOGI/EVFP exchanges should be consumed by base fabric. 1246 * FLOGI/EVFP exchanges should be consumed by base fabric.
1247 */ 1247 */
1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { 1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
@@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1252 } 1252 }
1253 1253
1254 if (fabric->bport.pid == pid) { 1254 if (fabric->bport.pid == pid) {
1255 /** 1255 /*
1256 * All authentication frames should be routed to auth 1256 * All authentication frames should be routed to auth
1257 */ 1257 */
1258 bfa_trc(fabric->fcs, els_cmd->els_code); 1258 bfa_trc(fabric->fcs, els_cmd->els_code);
@@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1266 return; 1266 return;
1267 } 1267 }
1268 1268
1269 /** 1269 /*
1270 * look for a matching local port ID 1270 * look for a matching local port ID
1271 */ 1271 */
1272 list_for_each(qe, &fabric->vport_q) { 1272 list_for_each(qe, &fabric->vport_q) {
@@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); 1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1281} 1281}
1282 1282
1283/** 1283/*
1284 * Unsolicited frames to be processed by fabric. 1284 * Unsolicited frames to be processed by fabric.
1285 */ 1285 */
1286static void 1286static void
@@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1304 } 1304 }
1305} 1305}
1306 1306
1307/** 1307/*
1308 * Process incoming FLOGI 1308 * Process incoming FLOGI
1309 */ 1309 */
1310static void 1310static void
@@ -1329,7 +1329,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1329 return; 1329 return;
1330 } 1330 }
1331 1331
1332 fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred); 1332 fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
1333 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; 1333 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1334 bport->port_topo.pn2n.reply_oxid = fchs->ox_id; 1334 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1335 1335
@@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1351 struct fchs_s fchs; 1351 struct fchs_s fchs;
1352 1352
1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); 1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1354 /** 1354 /*
1355 * Do not expect this failure -- expect remote node to retry 1355 * Do not expect this failure -- expect remote node to retry
1356 */ 1356 */
1357 if (!fcxp) 1357 if (!fcxp)
@@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1370 FC_MAX_PDUSZ, 0); 1370 FC_MAX_PDUSZ, 0);
1371} 1371}
1372 1372
1373/** 1373/*
1374 * Flogi Acc completion callback. 1374 * Flogi Acc completion callback.
1375 */ 1375 */
1376static void 1376static void
@@ -1417,130 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1417 } 1417 }
1418} 1418}
1419 1419
1420/** 1420/*
1421 * fcs_vf_api virtual fabrics API
1422 */
1423
1424/**
1425 * Enable VF mode.
1426 *
1427 * @param[in] fcs fcs module instance
1428 * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL
1429 * to use standard default vf_id of 1.
1430 *
1431 * @retval BFA_STATUS_OK vf mode is enabled
1432 * @retval BFA_STATUS_BUSY Port is active. Port must be disabled
1433 * before VF mode can be enabled.
1434 */
1435bfa_status_t
1436bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
1437{
1438 return BFA_STATUS_OK;
1439}
1440
1441/**
1442 * Disable VF mode.
1443 *
1444 * @param[in] fcs fcs module instance
1445 *
1446 * @retval BFA_STATUS_OK vf mode is disabled
1447 * @retval BFA_STATUS_BUSY VFs are present and being used. All
1448 * VFs must be deleted before disabling
1449 * VF mode.
1450 */
1451bfa_status_t
1452bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
1453{
1454 return BFA_STATUS_OK;
1455}
1456
1457/**
1458 * Create a new VF instance.
1459 *
1460 * A new VF is created using the given VF configuration. A VF is identified
1461 * by VF id. No duplicate VF creation is allowed with the same VF id. Once
1462 * a VF is created, VF is automatically started after link initialization
1463 * and EVFP exchange is completed.
1464 *
1465 * param[in] vf - FCS vf data structure. Memory is
1466 * allocated by caller (driver)
1467 * param[in] fcs - FCS module
1468 * param[in] vf_cfg - VF configuration
1469 * param[in] vf_drv - Opaque handle back to the driver's
1470 * virtual vf structure
1471 *
1472 * retval BFA_STATUS_OK VF creation is successful
1473 * retval BFA_STATUS_FAILED VF creation failed
1474 * retval BFA_STATUS_EEXIST A VF exists with the given vf_id
1475 */
1476bfa_status_t
1477bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
1478 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
1479{
1480 bfa_trc(fcs, vf_id);
1481 return BFA_STATUS_OK;
1482}
1483
1484/**
1485 * Use this function to delete a BFA VF object. VF object should
1486 * be stopped before this function call.
1487 *
1488 * param[in] vf - pointer to bfa_vf_t.
1489 *
1490 * retval BFA_STATUS_OK On vf deletion success
1491 * retval BFA_STATUS_BUSY VF is not in a stopped state
1492 * retval BFA_STATUS_INPROGRESS VF deletion in in progress
1493 */
1494bfa_status_t
1495bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
1496{
1497 bfa_trc(vf->fcs, vf->vf_id);
1498 return BFA_STATUS_OK;
1499}
1500
1501
1502/**
1503 * Returns attributes of the given VF.
1504 *
1505 * param[in] vf pointer to bfa_vf_t.
1506 * param[out] vf_attr vf attributes returned
1507 *
1508 * return None
1509 */
1510void
1511bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
1512{
1513 bfa_trc(vf->fcs, vf->vf_id);
1514}
1515
1516/**
1517 * Return statistics associated with the given vf.
1518 *
1519 * param[in] vf pointer to bfa_vf_t.
1520 * param[out] vf_stats vf statistics returned
1521 *
1522 * @return None
1523 */
1524void
1525bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
1526{
1527 bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
1528}
1529
1530/**
1531 * clear statistics associated with the given vf.
1532 *
1533 * param[in] vf pointer to bfa_vf_t.
1534 *
1535 * @return None
1536 */
1537void
1538bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
1539{
1540 bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
1541}
1542
1543/**
1544 * Returns FCS vf structure for a given vf_id. 1421 * Returns FCS vf structure for a given vf_id.
1545 * 1422 *
1546 * param[in] vf_id - VF_ID 1423 * param[in] vf_id - VF_ID
@@ -1558,81 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
1558 return NULL; 1435 return NULL;
1559} 1436}
1560 1437
1561/** 1438/*
1562 * Return the list of VFs configured.
1563 *
1564 * param[in] fcs fcs module instance
1565 * param[out] vf_ids returned list of vf_ids
1566 * param[in,out] nvfs in:size of vf_ids array,
1567 * out:total elements present,
1568 * actual elements returned is limited by the size
1569 *
1570 * return Driver VF structure
1571 */
1572void
1573bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
1574{
1575 bfa_trc(fcs, *nvfs);
1576}
1577
1578/**
1579 * Return the list of all VFs visible from fabric.
1580 *
1581 * param[in] fcs fcs module instance
1582 * param[out] vf_ids returned list of vf_ids
1583 * param[in,out] nvfs in:size of vf_ids array,
1584 * out:total elements present,
1585 * actual elements returned is limited by the size
1586 *
1587 * return Driver VF structure
1588 */
1589void
1590bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
1591{
1592 bfa_trc(fcs, *nvfs);
1593}
1594
1595/**
1596 * Return the list of local logical ports present in the given VF.
1597 *
1598 * param[in] vf vf for which logical ports are returned
1599 * param[out] lpwwn returned logical port wwn list
1600 * param[in,out] nlports in:size of lpwwn list;
1601 * out:total elements present,
1602 * actual elements returned is limited by the size
1603 */
1604void
1605bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
1606{
1607 struct list_head *qe;
1608 struct bfa_fcs_vport_s *vport;
1609 int i;
1610 struct bfa_fcs_s *fcs;
1611
1612 if (vf == NULL || lpwwn == NULL || *nlports == 0)
1613 return;
1614
1615 fcs = vf->fcs;
1616
1617 bfa_trc(fcs, vf->vf_id);
1618 bfa_trc(fcs, (u32) *nlports);
1619
1620 i = 0;
1621 lpwwn[i++] = vf->bport.port_cfg.pwwn;
1622
1623 list_for_each(qe, &vf->vport_q) {
1624 if (i >= *nlports)
1625 break;
1626
1627 vport = (struct bfa_fcs_vport_s *) qe;
1628 lpwwn[i++] = vport->lport.port_cfg.pwwn;
1629 }
1630
1631 bfa_trc(fcs, i);
1632 *nlports = i;
1633}
1634
1635/**
1636 * BFA FCS PPORT ( physical port) 1439 * BFA FCS PPORT ( physical port)
1637 */ 1440 */
1638static void 1441static void
@@ -1662,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
1662 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); 1465 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
1663} 1466}
1664 1467
1665/** 1468/*
1666 * BFA FCS UF ( Unsolicited Frames) 1469 * BFA FCS UF ( Unsolicited Frames)
1667 */ 1470 */
1668 1471
1669/** 1472/*
1670 * BFA callback for unsolicited frame receive handler. 1473 * BFA callback for unsolicited frame receive handler.
1671 * 1474 *
1672 * @param[in] cbarg callback arg for receive handler 1475 * @param[in] cbarg callback arg for receive handler
@@ -1683,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1683 struct fc_vft_s *vft; 1486 struct fc_vft_s *vft;
1684 struct bfa_fcs_fabric_s *fabric; 1487 struct bfa_fcs_fabric_s *fabric;
1685 1488
1686 /** 1489 /*
1687 * check for VFT header 1490 * check for VFT header
1688 */ 1491 */
1689 if (fchs->routing == FC_RTG_EXT_HDR && 1492 if (fchs->routing == FC_RTG_EXT_HDR &&
@@ -1695,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1695 else 1498 else
1696 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); 1499 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
1697 1500
1698 /** 1501 /*
1699 * drop frame if vfid is unknown 1502 * drop frame if vfid is unknown
1700 */ 1503 */
1701 if (!fabric) { 1504 if (!fabric) {
@@ -1705,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1705 return; 1508 return;
1706 } 1509 }
1707 1510
1708 /** 1511 /*
1709 * skip vft header 1512 * skip vft header
1710 */ 1513 */
1711 fchs = (struct fchs_s *) (vft + 1); 1514 fchs = (struct fchs_s *) (vft + 1);
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index d75045df1e7..9cb6a55977c 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -196,7 +196,7 @@ struct bfa_fcs_fabric_s {
196#define bfa_fcs_fabric_is_switched(__f) \ 196#define bfa_fcs_fabric_is_switched(__f) \
197 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) 197 ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
198 198
199/** 199/*
200 * The design calls for a single implementation of base fabric and vf. 200 * The design calls for a single implementation of base fabric and vf.
201 */ 201 */
202#define bfa_fcs_vf_t struct bfa_fcs_fabric_s 202#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
@@ -216,7 +216,7 @@ struct bfa_fcs_fabric_s;
216 216
217#define bfa_fcs_lport_t struct bfa_fcs_lport_s 217#define bfa_fcs_lport_t struct bfa_fcs_lport_s
218 218
219/** 219/*
220 * Symbolic Name related defines 220 * Symbolic Name related defines
221 * Total bytes 255. 221 * Total bytes 255.
222 * Physical Port's symbolic name 128 bytes. 222 * Physical Port's symbolic name 128 bytes.
@@ -239,7 +239,7 @@ struct bfa_fcs_fabric_s;
239#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 239#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
240#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 240#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
241 241
242/** 242/*
243 * Get FC port ID for a logical port. 243 * Get FC port ID for a logical port.
244 */ 244 */
245#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid) 245#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
@@ -262,7 +262,7 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
262#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \ 262#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
263 ((_lport)->fabric->fabric_ip_addr) 263 ((_lport)->fabric->fabric_ip_addr)
264 264
265/** 265/*
266 * bfa fcs port public functions 266 * bfa fcs port public functions
267 */ 267 */
268 268
@@ -342,7 +342,7 @@ struct bfa_fcs_vport_s {
342#define bfa_fcs_vport_get_port(vport) \ 342#define bfa_fcs_vport_get_port(vport) \
343 ((struct bfa_fcs_lport_s *)(&vport->port)) 343 ((struct bfa_fcs_lport_s *)(&vport->port))
344 344
345/** 345/*
346 * bfa fcs vport public functions 346 * bfa fcs vport public functions
347 */ 347 */
348bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, 348bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
@@ -393,7 +393,7 @@ struct bfa_fcs_rpf_s {
393 enum bfa_port_speed rpsc_speed; 393 enum bfa_port_speed rpsc_speed;
394 /* Current Speed from RPSC. O if RPSC fails */ 394 /* Current Speed from RPSC. O if RPSC fails */
395 enum bfa_port_speed assigned_speed; 395 enum bfa_port_speed assigned_speed;
396 /** 396 /*
397 * Speed assigned by the user. will be used if RPSC is 397 * Speed assigned by the user. will be used if RPSC is
398 * not supported by the rport. 398 * not supported by the rport.
399 */ 399 */
@@ -434,7 +434,7 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
434 return rport->bfa_rport; 434 return rport->bfa_rport;
435} 435}
436 436
437/** 437/*
438 * bfa fcs rport API functions 438 * bfa fcs rport API functions
439 */ 439 */
440bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, 440bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
@@ -573,7 +573,7 @@ bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
573 return itnim->bfa_itnim; 573 return itnim->bfa_itnim;
574} 574}
575 575
576/** 576/*
577 * bfa fcs FCP Initiator mode API functions 577 * bfa fcs FCP Initiator mode API functions
578 */ 578 */
579void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim, 579void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
@@ -677,22 +677,9 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
677void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); 677void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
678void bfa_fcs_start(struct bfa_fcs_s *fcs); 678void bfa_fcs_start(struct bfa_fcs_s *fcs);
679 679
680/** 680/*
681 * bfa fcs vf public functions 681 * bfa fcs vf public functions
682 */ 682 */
683bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
684bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
685bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
686 u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
687 struct bfad_vf_s *vf_drv);
688bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
689void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
690void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
691void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
692void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
693 struct bfa_vf_stats_s *vf_stats);
694void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
695void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
696bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); 683bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
697u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); 684u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
698 685
@@ -729,11 +716,11 @@ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
729void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); 716void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
730void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); 717void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
731 718
732/** 719/*
733 * BFA FCS callback interfaces 720 * BFA FCS callback interfaces
734 */ 721 */
735 722
736/** 723/*
737 * fcb Main fcs callbacks 724 * fcb Main fcs callbacks
738 */ 725 */
739 726
@@ -742,7 +729,7 @@ struct bfad_vf_s;
742struct bfad_vport_s; 729struct bfad_vport_s;
743struct bfad_rport_s; 730struct bfad_rport_s;
744 731
745/** 732/*
746 * lport callbacks 733 * lport callbacks
747 */ 734 */
748struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad, 735struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
@@ -754,19 +741,19 @@ void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
754 struct bfad_vf_s *vf_drv, 741 struct bfad_vf_s *vf_drv,
755 struct bfad_vport_s *vp_drv); 742 struct bfad_vport_s *vp_drv);
756 743
757/** 744/*
758 * vport callbacks 745 * vport callbacks
759 */ 746 */
760void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s); 747void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
761 748
762/** 749/*
763 * rport callbacks 750 * rport callbacks
764 */ 751 */
765bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, 752bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
766 struct bfa_fcs_rport_s **rport, 753 struct bfa_fcs_rport_s **rport,
767 struct bfad_rport_s **rport_drv); 754 struct bfad_rport_s **rport_drv);
768 755
769/** 756/*
770 * itnim callbacks 757 * itnim callbacks
771 */ 758 */
772void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 759void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 569dfefab70..9662bcdeb41 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * fcpim.c - FCP initiator mode i-t nexus state machine 19 * fcpim.c - FCP initiator mode i-t nexus state machine
20 */ 20 */
21 21
@@ -38,7 +38,7 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
38 bfa_status_t req_status, u32 rsp_len, 38 bfa_status_t req_status, u32 rsp_len,
39 u32 resid_len, struct fchs_s *rsp_fchs); 39 u32 resid_len, struct fchs_s *rsp_fchs);
40 40
41/** 41/*
42 * fcs_itnim_sm FCS itnim state machine events 42 * fcs_itnim_sm FCS itnim state machine events
43 */ 43 */
44 44
@@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, 84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
85}; 85};
86 86
87/** 87/*
88 * fcs_itnim_sm FCS itnim state machine 88 * fcs_itnim_sm FCS itnim state machine
89 */ 89 */
90 90
@@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
494 494
495 495
496 496
497/** 497/*
498 * itnim_public FCS ITNIM public interfaces 498 * itnim_public FCS ITNIM public interfaces
499 */ 499 */
500 500
501/** 501/*
502 * Called by rport when a new rport is created. 502 * Called by rport when a new rport is created.
503 * 503 *
504 * @param[in] rport - remote port. 504 * @param[in] rport - remote port.
@@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
554 return itnim; 554 return itnim;
555} 555}
556 556
557/** 557/*
558 * Called by rport to delete the instance of FCPIM. 558 * Called by rport to delete the instance of FCPIM.
559 * 559 *
560 * @param[in] rport - remote port. 560 * @param[in] rport - remote port.
@@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); 566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
567} 567}
568 568
569/** 569/*
570 * Notification from rport that PLOGI is complete to initiate FC-4 session. 570 * Notification from rport that PLOGI is complete to initiate FC-4 session.
571 */ 571 */
572void 572void
@@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
586 } 586 }
587} 587}
588 588
589/** 589/*
590 * Called by rport to handle a remote device offline. 590 * Called by rport to handle a remote device offline.
591 */ 591 */
592void 592void
@@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); 596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
597} 597}
598 598
599/** 599/*
600 * Called by rport when remote port is known to be an initiator from 600 * Called by rport when remote port is known to be an initiator from
601 * PRLI received. 601 * PRLI received.
602 */ 602 */
@@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
609} 609}
610 610
611/** 611/*
612 * Called by rport to check if the itnim is online. 612 * Called by rport to check if the itnim is online.
613 */ 613 */
614bfa_status_t 614bfa_status_t
@@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
625 } 625 }
626} 626}
627 627
628/** 628/*
629 * BFA completion callback for bfa_itnim_online(). 629 * BFA completion callback for bfa_itnim_online().
630 */ 630 */
631void 631void
@@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg)
637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
638} 638}
639 639
640/** 640/*
641 * BFA completion callback for bfa_itnim_offline(). 641 * BFA completion callback for bfa_itnim_offline().
642 */ 642 */
643void 643void
@@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg)
649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
650} 650}
651 651
652/** 652/*
653 * Mark the beginning of PATH TOV handling. IO completion callbacks 653 * Mark the beginning of PATH TOV handling. IO completion callbacks
654 * are still pending. 654 * are still pending.
655 */ 655 */
@@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
661 bfa_trc(itnim->fcs, itnim->rport->pwwn); 661 bfa_trc(itnim->fcs, itnim->rport->pwwn);
662} 662}
663 663
664/** 664/*
665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. 665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
666 */ 666 */
667void 667void
@@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg)
674 itnim_drv->state = ITNIM_STATE_TIMEOUT; 674 itnim_drv->state = ITNIM_STATE_TIMEOUT;
675} 675}
676 676
677/** 677/*
678 * BFA notification to FCS/driver for second level error recovery. 678 * BFA notification to FCS/driver for second level error recovery.
679 * 679 *
680 * Atleast one I/O request has timedout and target is unresponsive to 680 * Atleast one I/O request has timedout and target is unresponsive to
@@ -736,7 +736,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
736 if (itnim == NULL) 736 if (itnim == NULL)
737 return BFA_STATUS_NO_FCPIM_NEXUS; 737 return BFA_STATUS_NO_FCPIM_NEXUS;
738 738
739 bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); 739 memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
740 740
741 return BFA_STATUS_OK; 741 return BFA_STATUS_OK;
742} 742}
@@ -753,7 +753,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
753 if (itnim == NULL) 753 if (itnim == NULL)
754 return BFA_STATUS_NO_FCPIM_NEXUS; 754 return BFA_STATUS_NO_FCPIM_NEXUS;
755 755
756 bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); 756 memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
757 return BFA_STATUS_OK; 757 return BFA_STATUS_OK;
758} 758}
759 759
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b522bf30247..377cbfff6f2 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -15,10 +15,6 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/**
19 * bfa_fcs_lport.c BFA FCS port
20 */
21
22#include "bfa_fcs.h" 18#include "bfa_fcs.h"
23#include "bfa_fcbuild.h" 19#include "bfa_fcbuild.h"
24#include "bfa_fc.h" 20#include "bfa_fc.h"
@@ -26,10 +22,6 @@
26 22
27BFA_TRC_FILE(FCS, PORT); 23BFA_TRC_FILE(FCS, PORT);
28 24
29/**
30 * Forward declarations
31 */
32
33static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 25static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
34 struct fchs_s *rx_fchs, u8 reason_code, 26 struct fchs_s *rx_fchs, u8 reason_code,
35 u8 reason_code_expl); 27 u8 reason_code_expl);
@@ -72,7 +64,7 @@ static struct {
72 bfa_fcs_lport_n2n_offline}, 64 bfa_fcs_lport_n2n_offline},
73 }; 65 };
74 66
75/** 67/*
76 * fcs_port_sm FCS logical port state machine 68 * fcs_port_sm FCS logical port state machine
77 */ 69 */
78 70
@@ -240,7 +232,7 @@ bfa_fcs_lport_sm_deleting(
240 } 232 }
241} 233}
242 234
243/** 235/*
244 * fcs_port_pvt 236 * fcs_port_pvt
245 */ 237 */
246 238
@@ -272,7 +264,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
272 FC_MAX_PDUSZ, 0); 264 FC_MAX_PDUSZ, 0);
273} 265}
274 266
275/** 267/*
276 * Process incoming plogi from a remote port. 268 * Process incoming plogi from a remote port.
277 */ 269 */
278static void 270static void
@@ -303,7 +295,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
303 return; 295 return;
304 } 296 }
305 297
306 /** 298 /*
307 * Direct Attach P2P mode : verify address assigned by the r-port. 299 * Direct Attach P2P mode : verify address assigned by the r-port.
308 */ 300 */
309 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && 301 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -319,12 +311,12 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
319 port->pid = rx_fchs->d_id; 311 port->pid = rx_fchs->d_id;
320 } 312 }
321 313
322 /** 314 /*
323 * First, check if we know the device by pwwn. 315 * First, check if we know the device by pwwn.
324 */ 316 */
325 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); 317 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
326 if (rport) { 318 if (rport) {
327 /** 319 /*
328 * Direct Attach P2P mode : handle address assigned by r-port. 320 * Direct Attach P2P mode : handle address assigned by r-port.
329 */ 321 */
330 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && 322 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -337,37 +329,37 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
337 return; 329 return;
338 } 330 }
339 331
340 /** 332 /*
341 * Next, lookup rport by PID. 333 * Next, lookup rport by PID.
342 */ 334 */
343 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); 335 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
344 if (!rport) { 336 if (!rport) {
345 /** 337 /*
346 * Inbound PLOGI from a new device. 338 * Inbound PLOGI from a new device.
347 */ 339 */
348 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 340 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
349 return; 341 return;
350 } 342 }
351 343
352 /** 344 /*
353 * Rport is known only by PID. 345 * Rport is known only by PID.
354 */ 346 */
355 if (rport->pwwn) { 347 if (rport->pwwn) {
356 /** 348 /*
357 * This is a different device with the same pid. Old device 349 * This is a different device with the same pid. Old device
358 * disappeared. Send implicit LOGO to old device. 350 * disappeared. Send implicit LOGO to old device.
359 */ 351 */
360 bfa_assert(rport->pwwn != plogi->port_name); 352 bfa_assert(rport->pwwn != plogi->port_name);
361 bfa_fcs_rport_logo_imp(rport); 353 bfa_fcs_rport_logo_imp(rport);
362 354
363 /** 355 /*
364 * Inbound PLOGI from a new device (with old PID). 356 * Inbound PLOGI from a new device (with old PID).
365 */ 357 */
366 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 358 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
367 return; 359 return;
368 } 360 }
369 361
370 /** 362 /*
371 * PLOGI crossing each other. 363 * PLOGI crossing each other.
372 */ 364 */
373 bfa_assert(rport->pwwn == WWN_NULL); 365 bfa_assert(rport->pwwn == WWN_NULL);
@@ -479,12 +471,12 @@ static void
479bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, 471bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
480 struct fc_rnid_general_topology_data_s *gen_topo_data) 472 struct fc_rnid_general_topology_data_s *gen_topo_data)
481{ 473{
482 bfa_os_memset(gen_topo_data, 0, 474 memset(gen_topo_data, 0,
483 sizeof(struct fc_rnid_general_topology_data_s)); 475 sizeof(struct fc_rnid_general_topology_data_s));
484 476
485 gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST); 477 gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
486 gen_topo_data->phy_port_num = 0; /* @todo */ 478 gen_topo_data->phy_port_num = 0; /* @todo */
487 gen_topo_data->num_attached_nodes = bfa_os_htonl(1); 479 gen_topo_data->num_attached_nodes = cpu_to_be32(1);
488} 480}
489 481
490static void 482static void
@@ -598,10 +590,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
598 590
599 591
600 592
601/** 593/*
602 * fcs_lport_api BFA FCS port API 594 * fcs_lport_api BFA FCS port API
603 */ 595 */
604/** 596/*
605 * Module initialization 597 * Module initialization
606 */ 598 */
607void 599void
@@ -610,7 +602,7 @@ bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
610 602
611} 603}
612 604
613/** 605/*
614 * Module cleanup 606 * Module cleanup
615 */ 607 */
616void 608void
@@ -619,7 +611,7 @@ bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
619 bfa_fcs_modexit_comp(fcs); 611 bfa_fcs_modexit_comp(fcs);
620} 612}
621 613
622/** 614/*
623 * Unsolicited frame receive handling. 615 * Unsolicited frame receive handling.
624 */ 616 */
625void 617void
@@ -637,7 +629,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
637 return; 629 return;
638 } 630 }
639 631
640 /** 632 /*
641 * First, handle ELSs that donot require a login. 633 * First, handle ELSs that donot require a login.
642 */ 634 */
643 /* 635 /*
@@ -673,7 +665,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
673 bfa_fcs_lport_abts_acc(lport, fchs); 665 bfa_fcs_lport_abts_acc(lport, fchs);
674 return; 666 return;
675 } 667 }
676 /** 668 /*
677 * look for a matching remote port ID 669 * look for a matching remote port ID
678 */ 670 */
679 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); 671 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
@@ -686,7 +678,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
686 return; 678 return;
687 } 679 }
688 680
689 /** 681 /*
690 * Only handles ELS frames for now. 682 * Only handles ELS frames for now.
691 */ 683 */
692 if (fchs->type != FC_TYPE_ELS) { 684 if (fchs->type != FC_TYPE_ELS) {
@@ -702,20 +694,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
702 } 694 }
703 695
704 if (els_cmd->els_code == FC_ELS_LOGO) { 696 if (els_cmd->els_code == FC_ELS_LOGO) {
705 /** 697 /*
706 * @todo Handle LOGO frames received. 698 * @todo Handle LOGO frames received.
707 */ 699 */
708 return; 700 return;
709 } 701 }
710 702
711 if (els_cmd->els_code == FC_ELS_PRLI) { 703 if (els_cmd->els_code == FC_ELS_PRLI) {
712 /** 704 /*
713 * @todo Handle PRLI frames received. 705 * @todo Handle PRLI frames received.
714 */ 706 */
715 return; 707 return;
716 } 708 }
717 709
718 /** 710 /*
719 * Unhandled ELS frames. Send a LS_RJT. 711 * Unhandled ELS frames. Send a LS_RJT.
720 */ 712 */
721 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, 713 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
@@ -723,7 +715,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
723 715
724} 716}
725 717
726/** 718/*
727 * PID based Lookup for a R-Port in the Port R-Port Queue 719 * PID based Lookup for a R-Port in the Port R-Port Queue
728 */ 720 */
729struct bfa_fcs_rport_s * 721struct bfa_fcs_rport_s *
@@ -742,7 +734,7 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
742 return NULL; 734 return NULL;
743} 735}
744 736
745/** 737/*
746 * PWWN based Lookup for a R-Port in the Port R-Port Queue 738 * PWWN based Lookup for a R-Port in the Port R-Port Queue
747 */ 739 */
748struct bfa_fcs_rport_s * 740struct bfa_fcs_rport_s *
@@ -761,7 +753,7 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
761 return NULL; 753 return NULL;
762} 754}
763 755
764/** 756/*
765 * NWWN based Lookup for a R-Port in the Port R-Port Queue 757 * NWWN based Lookup for a R-Port in the Port R-Port Queue
766 */ 758 */
767struct bfa_fcs_rport_s * 759struct bfa_fcs_rport_s *
@@ -780,7 +772,7 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
780 return NULL; 772 return NULL;
781} 773}
782 774
783/** 775/*
784 * Called by rport module when new rports are discovered. 776 * Called by rport module when new rports are discovered.
785 */ 777 */
786void 778void
@@ -792,7 +784,7 @@ bfa_fcs_lport_add_rport(
792 port->num_rports++; 784 port->num_rports++;
793} 785}
794 786
795/** 787/*
796 * Called by rport module to when rports are deleted. 788 * Called by rport module to when rports are deleted.
797 */ 789 */
798void 790void
@@ -807,7 +799,7 @@ bfa_fcs_lport_del_rport(
807 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); 799 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
808} 800}
809 801
810/** 802/*
811 * Called by fabric for base port when fabric login is complete. 803 * Called by fabric for base port when fabric login is complete.
812 * Called by vport for virtual ports when FDISC is complete. 804 * Called by vport for virtual ports when FDISC is complete.
813 */ 805 */
@@ -817,7 +809,7 @@ bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
817 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 809 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
818} 810}
819 811
820/** 812/*
821 * Called by fabric for base port when fabric goes offline. 813 * Called by fabric for base port when fabric goes offline.
822 * Called by vport for virtual ports when virtual port becomes offline. 814 * Called by vport for virtual ports when virtual port becomes offline.
823 */ 815 */
@@ -827,7 +819,7 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
827 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 819 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
828} 820}
829 821
830/** 822/*
831 * Called by fabric to delete base lport and associated resources. 823 * Called by fabric to delete base lport and associated resources.
832 * 824 *
833 * Called by vport to delete lport and associated resources. Should call 825 * Called by vport to delete lport and associated resources. Should call
@@ -839,7 +831,7 @@ bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
839 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 831 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
840} 832}
841 833
842/** 834/*
843 * Return TRUE if port is online, else return FALSE 835 * Return TRUE if port is online, else return FALSE
844 */ 836 */
845bfa_boolean_t 837bfa_boolean_t
@@ -848,7 +840,7 @@ bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
848 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); 840 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
849} 841}
850 842
851/** 843/*
852 * Attach time initialization of logical ports. 844 * Attach time initialization of logical ports.
853 */ 845 */
854void 846void
@@ -865,7 +857,7 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
865 lport->num_rports = 0; 857 lport->num_rports = 0;
866} 858}
867 859
868/** 860/*
869 * Logical port initialization of base or virtual port. 861 * Logical port initialization of base or virtual port.
870 * Called by fabric for base port or by vport for virtual ports. 862 * Called by fabric for base port or by vport for virtual ports.
871 */ 863 */
@@ -878,7 +870,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
878 struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad; 870 struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
879 char lpwwn_buf[BFA_STRING_32]; 871 char lpwwn_buf[BFA_STRING_32];
880 872
881 bfa_os_assign(lport->port_cfg, *port_cfg); 873 lport->port_cfg = *port_cfg;
882 874
883 lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport, 875 lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
884 lport->port_cfg.roles, 876 lport->port_cfg.roles,
@@ -894,7 +886,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 886 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
895} 887}
896 888
897/** 889/*
898 * fcs_lport_api 890 * fcs_lport_api
899 */ 891 */
900 892
@@ -934,11 +926,11 @@ bfa_fcs_lport_get_attr(
934 } 926 }
935} 927}
936 928
937/** 929/*
938 * bfa_fcs_lport_fab port fab functions 930 * bfa_fcs_lport_fab port fab functions
939 */ 931 */
940 932
941/** 933/*
942 * Called by port to initialize fabric services of the base port. 934 * Called by port to initialize fabric services of the base port.
943 */ 935 */
944static void 936static void
@@ -949,7 +941,7 @@ bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
949 bfa_fcs_lport_ms_init(port); 941 bfa_fcs_lport_ms_init(port);
950} 942}
951 943
952/** 944/*
953 * Called by port to notify transition to online state. 945 * Called by port to notify transition to online state.
954 */ 946 */
955static void 947static void
@@ -959,7 +951,7 @@ bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
959 bfa_fcs_lport_scn_online(port); 951 bfa_fcs_lport_scn_online(port);
960} 952}
961 953
962/** 954/*
963 * Called by port to notify transition to offline state. 955 * Called by port to notify transition to offline state.
964 */ 956 */
965static void 957static void
@@ -970,11 +962,11 @@ bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
970 bfa_fcs_lport_ms_offline(port); 962 bfa_fcs_lport_ms_offline(port);
971} 963}
972 964
973/** 965/*
974 * bfa_fcs_lport_n2n functions 966 * bfa_fcs_lport_n2n functions
975 */ 967 */
976 968
977/** 969/*
978 * Called by fcs/port to initialize N2N topology. 970 * Called by fcs/port to initialize N2N topology.
979 */ 971 */
980static void 972static void
@@ -982,7 +974,7 @@ bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
982{ 974{
983} 975}
984 976
985/** 977/*
986 * Called by fcs/port to notify transition to online state. 978 * Called by fcs/port to notify transition to online state.
987 */ 979 */
988static void 980static void
@@ -1006,7 +998,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
1006 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, 998 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
1007 sizeof(wwn_t)) > 0) { 999 sizeof(wwn_t)) > 0) {
1008 port->pid = N2N_LOCAL_PID; 1000 port->pid = N2N_LOCAL_PID;
1009 /** 1001 /*
1010 * First, check if we know the device by pwwn. 1002 * First, check if we know the device by pwwn.
1011 */ 1003 */
1012 rport = bfa_fcs_lport_get_rport_by_pwwn(port, 1004 rport = bfa_fcs_lport_get_rport_by_pwwn(port,
@@ -1035,7 +1027,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
1035 } 1027 }
1036} 1028}
1037 1029
1038/** 1030/*
1039 * Called by fcs/port to notify transition to offline state. 1031 * Called by fcs/port to notify transition to offline state.
1040 */ 1032 */
1041static void 1033static void
@@ -1094,11 +1086,11 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1094 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 1086 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
1095static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 1087static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1096 struct bfa_fcs_fdmi_port_attr_s *port_attr); 1088 struct bfa_fcs_fdmi_port_attr_s *port_attr);
1097/** 1089/*
1098 * fcs_fdmi_sm FCS FDMI state machine 1090 * fcs_fdmi_sm FCS FDMI state machine
1099 */ 1091 */
1100 1092
1101/** 1093/*
1102 * FDMI State Machine events 1094 * FDMI State Machine events
1103 */ 1095 */
1104enum port_fdmi_event { 1096enum port_fdmi_event {
@@ -1143,7 +1135,7 @@ static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1143static void bfa_fcs_lport_fdmi_sm_disabled( 1135static void bfa_fcs_lport_fdmi_sm_disabled(
1144 struct bfa_fcs_lport_fdmi_s *fdmi, 1136 struct bfa_fcs_lport_fdmi_s *fdmi,
1145 enum port_fdmi_event event); 1137 enum port_fdmi_event event);
1146/** 1138/*
1147 * Start in offline state - awaiting MS to send start. 1139 * Start in offline state - awaiting MS to send start.
1148 */ 1140 */
1149static void 1141static void
@@ -1510,7 +1502,7 @@ bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1510 bfa_sm_fault(port->fcs, event); 1502 bfa_sm_fault(port->fcs, event);
1511 } 1503 }
1512} 1504}
1513/** 1505/*
1514 * FDMI is disabled state. 1506 * FDMI is disabled state.
1515 */ 1507 */
1516static void 1508static void
@@ -1525,7 +1517,7 @@ bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
1525 /* No op State. It can only be enabled at Driver Init. */ 1517 /* No op State. It can only be enabled at Driver Init. */
1526} 1518}
1527 1519
1528/** 1520/*
1529* RHBA : Register HBA Attributes. 1521* RHBA : Register HBA Attributes.
1530 */ 1522 */
1531static void 1523static void
@@ -1549,7 +1541,7 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1549 fdmi->fcxp = fcxp; 1541 fdmi->fcxp = fcxp;
1550 1542
1551 pyld = bfa_fcxp_get_reqbuf(fcxp); 1543 pyld = bfa_fcxp_get_reqbuf(fcxp);
1552 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); 1544 memset(pyld, 0, FC_MAX_PDUSZ);
1553 1545
1554 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 1546 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
1555 FDMI_RHBA); 1547 FDMI_RHBA);
@@ -1584,7 +1576,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1584 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); 1576 bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
1585 1577
1586 rhba->hba_id = bfa_fcs_lport_get_pwwn(port); 1578 rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
1587 rhba->port_list.num_ports = bfa_os_htonl(1); 1579 rhba->port_list.num_ports = cpu_to_be32(1);
1588 rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port); 1580 rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
1589 1581
1590 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); 1582 len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
@@ -1601,86 +1593,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1601 * Node Name 1593 * Node Name
1602 */ 1594 */
1603 attr = (struct fdmi_attr_s *) curr_ptr; 1595 attr = (struct fdmi_attr_s *) curr_ptr;
1604 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME); 1596 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
1605 attr->len = sizeof(wwn_t); 1597 attr->len = sizeof(wwn_t);
1606 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len); 1598 memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
1607 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1599 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1608 len += attr->len; 1600 len += attr->len;
1609 count++; 1601 count++;
1610 attr->len = 1602 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1611 bfa_os_htons(attr->len + sizeof(attr->type) +
1612 sizeof(attr->len)); 1603 sizeof(attr->len));
1613 1604
1614 /* 1605 /*
1615 * Manufacturer 1606 * Manufacturer
1616 */ 1607 */
1617 attr = (struct fdmi_attr_s *) curr_ptr; 1608 attr = (struct fdmi_attr_s *) curr_ptr;
1618 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER); 1609 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
1619 attr->len = (u16) strlen(fcs_hba_attr->manufacturer); 1610 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
1620 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); 1611 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
1621 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1612 attr->len = fc_roundup(attr->len, sizeof(u32));
1622 *fields need
1623 *to be 4 byte
1624 *aligned */
1625 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1613 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1626 len += attr->len; 1614 len += attr->len;
1627 count++; 1615 count++;
1628 attr->len = 1616 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1629 bfa_os_htons(attr->len + sizeof(attr->type) +
1630 sizeof(attr->len)); 1617 sizeof(attr->len));
1631 1618
1632 /* 1619 /*
1633 * Serial Number 1620 * Serial Number
1634 */ 1621 */
1635 attr = (struct fdmi_attr_s *) curr_ptr; 1622 attr = (struct fdmi_attr_s *) curr_ptr;
1636 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM); 1623 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
1637 attr->len = (u16) strlen(fcs_hba_attr->serial_num); 1624 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
1638 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); 1625 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
1639 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1626 attr->len = fc_roundup(attr->len, sizeof(u32));
1640 *fields need
1641 *to be 4 byte
1642 *aligned */
1643 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1627 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1644 len += attr->len; 1628 len += attr->len;
1645 count++; 1629 count++;
1646 attr->len = 1630 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1647 bfa_os_htons(attr->len + sizeof(attr->type) +
1648 sizeof(attr->len)); 1631 sizeof(attr->len));
1649 1632
1650 /* 1633 /*
1651 * Model 1634 * Model
1652 */ 1635 */
1653 attr = (struct fdmi_attr_s *) curr_ptr; 1636 attr = (struct fdmi_attr_s *) curr_ptr;
1654 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL); 1637 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
1655 attr->len = (u16) strlen(fcs_hba_attr->model); 1638 attr->len = (u16) strlen(fcs_hba_attr->model);
1656 memcpy(attr->value, fcs_hba_attr->model, attr->len); 1639 memcpy(attr->value, fcs_hba_attr->model, attr->len);
1657 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1640 attr->len = fc_roundup(attr->len, sizeof(u32));
1658 *fields need
1659 *to be 4 byte
1660 *aligned */
1661 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1641 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1662 len += attr->len; 1642 len += attr->len;
1663 count++; 1643 count++;
1664 attr->len = 1644 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1665 bfa_os_htons(attr->len + sizeof(attr->type) +
1666 sizeof(attr->len)); 1645 sizeof(attr->len));
1667 1646
1668 /* 1647 /*
1669 * Model Desc 1648 * Model Desc
1670 */ 1649 */
1671 attr = (struct fdmi_attr_s *) curr_ptr; 1650 attr = (struct fdmi_attr_s *) curr_ptr;
1672 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC); 1651 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
1673 attr->len = (u16) strlen(fcs_hba_attr->model_desc); 1652 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
1674 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); 1653 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
1675 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1654 attr->len = fc_roundup(attr->len, sizeof(u32));
1676 *fields need
1677 *to be 4 byte
1678 *aligned */
1679 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1655 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1680 len += attr->len; 1656 len += attr->len;
1681 count++; 1657 count++;
1682 attr->len = 1658 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1683 bfa_os_htons(attr->len + sizeof(attr->type) +
1684 sizeof(attr->len)); 1659 sizeof(attr->len));
1685 1660
1686 /* 1661 /*
@@ -1688,18 +1663,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1688 */ 1663 */
1689 if (fcs_hba_attr->hw_version[0] != '\0') { 1664 if (fcs_hba_attr->hw_version[0] != '\0') {
1690 attr = (struct fdmi_attr_s *) curr_ptr; 1665 attr = (struct fdmi_attr_s *) curr_ptr;
1691 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION); 1666 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
1692 attr->len = (u16) strlen(fcs_hba_attr->hw_version); 1667 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
1693 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); 1668 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
1694 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1669 attr->len = fc_roundup(attr->len, sizeof(u32));
1695 *fields need
1696 *to be 4 byte
1697 *aligned */
1698 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1670 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1699 len += attr->len; 1671 len += attr->len;
1700 count++; 1672 count++;
1701 attr->len = 1673 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1702 bfa_os_htons(attr->len + sizeof(attr->type) +
1703 sizeof(attr->len)); 1674 sizeof(attr->len));
1704 } 1675 }
1705 1676
@@ -1707,18 +1678,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1707 * Driver Version 1678 * Driver Version
1708 */ 1679 */
1709 attr = (struct fdmi_attr_s *) curr_ptr; 1680 attr = (struct fdmi_attr_s *) curr_ptr;
1710 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1681 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
1711 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1682 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1712 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1683 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1713 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1684 attr->len = fc_roundup(attr->len, sizeof(u32));
1714 *fields need
1715 *to be 4 byte
1716 *aligned */
1717 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1685 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1718 len += attr->len;; 1686 len += attr->len;;
1719 count++; 1687 count++;
1720 attr->len = 1688 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1721 bfa_os_htons(attr->len + sizeof(attr->type) +
1722 sizeof(attr->len)); 1689 sizeof(attr->len));
1723 1690
1724 /* 1691 /*
@@ -1726,18 +1693,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1726 */ 1693 */
1727 if (fcs_hba_attr->option_rom_ver[0] != '\0') { 1694 if (fcs_hba_attr->option_rom_ver[0] != '\0') {
1728 attr = (struct fdmi_attr_s *) curr_ptr; 1695 attr = (struct fdmi_attr_s *) curr_ptr;
1729 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION); 1696 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
1730 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); 1697 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
1731 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); 1698 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
1732 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1699 attr->len = fc_roundup(attr->len, sizeof(u32));
1733 *fields need
1734 *to be 4 byte
1735 *aligned */
1736 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1700 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1737 len += attr->len; 1701 len += attr->len;
1738 count++; 1702 count++;
1739 attr->len = 1703 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1740 bfa_os_htons(attr->len + sizeof(attr->type) +
1741 sizeof(attr->len)); 1704 sizeof(attr->len));
1742 } 1705 }
1743 1706
@@ -1745,18 +1708,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1745 * f/w Version = driver version 1708 * f/w Version = driver version
1746 */ 1709 */
1747 attr = (struct fdmi_attr_s *) curr_ptr; 1710 attr = (struct fdmi_attr_s *) curr_ptr;
1748 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION); 1711 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
1749 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1712 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1750 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1713 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1751 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1714 attr->len = fc_roundup(attr->len, sizeof(u32));
1752 *fields need
1753 *to be 4 byte
1754 *aligned */
1755 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1715 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1756 len += attr->len; 1716 len += attr->len;
1757 count++; 1717 count++;
1758 attr->len = 1718 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1759 bfa_os_htons(attr->len + sizeof(attr->type) +
1760 sizeof(attr->len)); 1719 sizeof(attr->len));
1761 1720
1762 /* 1721 /*
@@ -1764,18 +1723,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1764 */ 1723 */
1765 if (fcs_hba_attr->os_name[0] != '\0') { 1724 if (fcs_hba_attr->os_name[0] != '\0') {
1766 attr = (struct fdmi_attr_s *) curr_ptr; 1725 attr = (struct fdmi_attr_s *) curr_ptr;
1767 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME); 1726 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
1768 attr->len = (u16) strlen(fcs_hba_attr->os_name); 1727 attr->len = (u16) strlen(fcs_hba_attr->os_name);
1769 memcpy(attr->value, fcs_hba_attr->os_name, attr->len); 1728 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
1770 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1729 attr->len = fc_roundup(attr->len, sizeof(u32));
1771 *fields need
1772 *to be 4 byte
1773 *aligned */
1774 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1730 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1775 len += attr->len; 1731 len += attr->len;
1776 count++; 1732 count++;
1777 attr->len = 1733 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1778 bfa_os_htons(attr->len + sizeof(attr->type) +
1779 sizeof(attr->len)); 1734 sizeof(attr->len));
1780 } 1735 }
1781 1736
@@ -1783,22 +1738,20 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1783 * MAX_CT_PAYLOAD 1738 * MAX_CT_PAYLOAD
1784 */ 1739 */
1785 attr = (struct fdmi_attr_s *) curr_ptr; 1740 attr = (struct fdmi_attr_s *) curr_ptr;
1786 attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT); 1741 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
1787 attr->len = sizeof(fcs_hba_attr->max_ct_pyld); 1742 attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
1788 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); 1743 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
1789 len += attr->len; 1744 len += attr->len;
1790 count++; 1745 count++;
1791 attr->len = 1746 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1792 bfa_os_htons(attr->len + sizeof(attr->type) +
1793 sizeof(attr->len)); 1747 sizeof(attr->len));
1794 1748
1795 /* 1749 /*
1796 * Update size of payload 1750 * Update size of payload
1797 */ 1751 */
1798 len += ((sizeof(attr->type) + 1752 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
1799 sizeof(attr->len)) * count);
1800 1753
1801 rhba->hba_attr_blk.attr_count = bfa_os_htonl(count); 1754 rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
1802 return len; 1755 return len;
1803} 1756}
1804 1757
@@ -1825,7 +1778,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1825 } 1778 }
1826 1779
1827 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1780 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1828 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1781 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
1829 1782
1830 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1783 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1831 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); 1784 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -1837,7 +1790,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1837 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 1790 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1838} 1791}
1839 1792
1840/** 1793/*
1841* RPRT : Register Port 1794* RPRT : Register Port
1842 */ 1795 */
1843static void 1796static void
@@ -1861,7 +1814,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1861 fdmi->fcxp = fcxp; 1814 fdmi->fcxp = fcxp;
1862 1815
1863 pyld = bfa_fcxp_get_reqbuf(fcxp); 1816 pyld = bfa_fcxp_get_reqbuf(fcxp);
1864 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); 1817 memset(pyld, 0, FC_MAX_PDUSZ);
1865 1818
1866 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 1819 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
1867 FDMI_RPRT); 1820 FDMI_RPRT);
@@ -1879,7 +1832,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1879 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); 1832 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
1880} 1833}
1881 1834
1882/** 1835/*
1883 * This routine builds Port Attribute Block that used in RPA, RPRT commands. 1836 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
1884 */ 1837 */
1885static u16 1838static u16
@@ -1909,56 +1862,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1909 * FC4 Types 1862 * FC4 Types
1910 */ 1863 */
1911 attr = (struct fdmi_attr_s *) curr_ptr; 1864 attr = (struct fdmi_attr_s *) curr_ptr;
1912 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES); 1865 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
1913 attr->len = sizeof(fcs_port_attr.supp_fc4_types); 1866 attr->len = sizeof(fcs_port_attr.supp_fc4_types);
1914 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); 1867 memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
1915 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1868 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1916 len += attr->len; 1869 len += attr->len;
1917 ++count; 1870 ++count;
1918 attr->len = 1871 attr->len =
1919 bfa_os_htons(attr->len + sizeof(attr->type) + 1872 cpu_to_be16(attr->len + sizeof(attr->type) +
1920 sizeof(attr->len)); 1873 sizeof(attr->len));
1921 1874
1922 /* 1875 /*
1923 * Supported Speed 1876 * Supported Speed
1924 */ 1877 */
1925 attr = (struct fdmi_attr_s *) curr_ptr; 1878 attr = (struct fdmi_attr_s *) curr_ptr;
1926 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED); 1879 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
1927 attr->len = sizeof(fcs_port_attr.supp_speed); 1880 attr->len = sizeof(fcs_port_attr.supp_speed);
1928 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); 1881 memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
1929 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1882 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1930 len += attr->len; 1883 len += attr->len;
1931 ++count; 1884 ++count;
1932 attr->len = 1885 attr->len =
1933 bfa_os_htons(attr->len + sizeof(attr->type) + 1886 cpu_to_be16(attr->len + sizeof(attr->type) +
1934 sizeof(attr->len)); 1887 sizeof(attr->len));
1935 1888
1936 /* 1889 /*
1937 * current Port Speed 1890 * current Port Speed
1938 */ 1891 */
1939 attr = (struct fdmi_attr_s *) curr_ptr; 1892 attr = (struct fdmi_attr_s *) curr_ptr;
1940 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED); 1893 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
1941 attr->len = sizeof(fcs_port_attr.curr_speed); 1894 attr->len = sizeof(fcs_port_attr.curr_speed);
1942 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); 1895 memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
1943 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1896 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1944 len += attr->len; 1897 len += attr->len;
1945 ++count; 1898 ++count;
1946 attr->len = 1899 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1947 bfa_os_htons(attr->len + sizeof(attr->type) +
1948 sizeof(attr->len)); 1900 sizeof(attr->len));
1949 1901
1950 /* 1902 /*
1951 * max frame size 1903 * max frame size
1952 */ 1904 */
1953 attr = (struct fdmi_attr_s *) curr_ptr; 1905 attr = (struct fdmi_attr_s *) curr_ptr;
1954 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE); 1906 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
1955 attr->len = sizeof(fcs_port_attr.max_frm_size); 1907 attr->len = sizeof(fcs_port_attr.max_frm_size);
1956 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); 1908 memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
1957 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1909 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1958 len += attr->len; 1910 len += attr->len;
1959 ++count; 1911 ++count;
1960 attr->len = 1912 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1961 bfa_os_htons(attr->len + sizeof(attr->type) +
1962 sizeof(attr->len)); 1913 sizeof(attr->len));
1963 1914
1964 /* 1915 /*
@@ -1966,18 +1917,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1966 */ 1917 */
1967 if (fcs_port_attr.os_device_name[0] != '\0') { 1918 if (fcs_port_attr.os_device_name[0] != '\0') {
1968 attr = (struct fdmi_attr_s *) curr_ptr; 1919 attr = (struct fdmi_attr_s *) curr_ptr;
1969 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME); 1920 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
1970 attr->len = (u16) strlen(fcs_port_attr.os_device_name); 1921 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
1971 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); 1922 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
1972 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1923 attr->len = fc_roundup(attr->len, sizeof(u32));
1973 *fields need
1974 *to be 4 byte
1975 *aligned */
1976 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1924 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1977 len += attr->len; 1925 len += attr->len;
1978 ++count; 1926 ++count;
1979 attr->len = 1927 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1980 bfa_os_htons(attr->len + sizeof(attr->type) +
1981 sizeof(attr->len)); 1928 sizeof(attr->len));
1982 } 1929 }
1983 /* 1930 /*
@@ -1985,27 +1932,22 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1985 */ 1932 */
1986 if (fcs_port_attr.host_name[0] != '\0') { 1933 if (fcs_port_attr.host_name[0] != '\0') {
1987 attr = (struct fdmi_attr_s *) curr_ptr; 1934 attr = (struct fdmi_attr_s *) curr_ptr;
1988 attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME); 1935 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
1989 attr->len = (u16) strlen(fcs_port_attr.host_name); 1936 attr->len = (u16) strlen(fcs_port_attr.host_name);
1990 memcpy(attr->value, fcs_port_attr.host_name, attr->len); 1937 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
1991 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1938 attr->len = fc_roundup(attr->len, sizeof(u32));
1992 *fields need
1993 *to be 4 byte
1994 *aligned */
1995 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1939 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1996 len += attr->len; 1940 len += attr->len;
1997 ++count; 1941 ++count;
1998 attr->len = 1942 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1999 bfa_os_htons(attr->len + sizeof(attr->type) +
2000 sizeof(attr->len)); 1943 sizeof(attr->len));
2001 } 1944 }
2002 1945
2003 /* 1946 /*
2004 * Update size of payload 1947 * Update size of payload
2005 */ 1948 */
2006 port_attrib->attr_count = bfa_os_htonl(count); 1949 port_attrib->attr_count = cpu_to_be32(count);
2007 len += ((sizeof(attr->type) + 1950 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
2008 sizeof(attr->len)) * count);
2009 return len; 1951 return len;
2010} 1952}
2011 1953
@@ -2050,7 +1992,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2050 } 1992 }
2051 1993
2052 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1994 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2053 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1995 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
2054 1996
2055 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1997 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2056 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); 1998 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2062,7 +2004,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2062 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 2004 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2063} 2005}
2064 2006
2065/** 2007/*
2066* RPA : Register Port Attributes. 2008* RPA : Register Port Attributes.
2067 */ 2009 */
2068static void 2010static void
@@ -2086,15 +2028,13 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2086 fdmi->fcxp = fcxp; 2028 fdmi->fcxp = fcxp;
2087 2029
2088 pyld = bfa_fcxp_get_reqbuf(fcxp); 2030 pyld = bfa_fcxp_get_reqbuf(fcxp);
2089 bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); 2031 memset(pyld, 0, FC_MAX_PDUSZ);
2090 2032
2091 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 2033 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
2092 FDMI_RPA); 2034 FDMI_RPA);
2093 2035
2094 attr_len = 2036 attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
2095 bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, 2037 (u8 *) ((struct ct_hdr_s *) pyld + 1));
2096 (u8 *) ((struct ct_hdr_s *) pyld
2097 + 1));
2098 2038
2099 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2039 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2100 FC_CLASS_3, len + attr_len, &fchs, 2040 FC_CLASS_3, len + attr_len, &fchs,
@@ -2143,7 +2083,7 @@ bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2143 } 2083 }
2144 2084
2145 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 2085 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2146 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2086 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
2147 2087
2148 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 2088 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2149 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); 2089 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2170,7 +2110,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2170 struct bfa_fcs_lport_s *port = fdmi->ms->port; 2110 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2171 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 2111 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2172 2112
2173 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 2113 memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
2174 2114
2175 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, 2115 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
2176 hba_attr->manufacturer); 2116 hba_attr->manufacturer);
@@ -2204,7 +2144,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2204 sizeof(driver_info->host_os_patch)); 2144 sizeof(driver_info->host_os_patch));
2205 } 2145 }
2206 2146
2207 hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ); 2147 hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
2208} 2148}
2209 2149
2210void 2150void
@@ -2215,7 +2155,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2215 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 2155 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2216 struct bfa_port_attr_s pport_attr; 2156 struct bfa_port_attr_s pport_attr;
2217 2157
2218 bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); 2158 memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
2219 2159
2220 /* 2160 /*
2221 * get pport attributes from hal 2161 * get pport attributes from hal
@@ -2230,17 +2170,17 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2230 /* 2170 /*
2231 * Supported Speeds 2171 * Supported Speeds
2232 */ 2172 */
2233 port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS); 2173 port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
2234 2174
2235 /* 2175 /*
2236 * Current Speed 2176 * Current Speed
2237 */ 2177 */
2238 port_attr->curr_speed = bfa_os_htonl(pport_attr.speed); 2178 port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
2239 2179
2240 /* 2180 /*
2241 * Max PDU Size. 2181 * Max PDU Size.
2242 */ 2182 */
2243 port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ); 2183 port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ);
2244 2184
2245 /* 2185 /*
2246 * OS device Name 2186 * OS device Name
@@ -2321,11 +2261,11 @@ static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
2321 u32 rsp_len, 2261 u32 rsp_len,
2322 u32 resid_len, 2262 u32 resid_len,
2323 struct fchs_s *rsp_fchs); 2263 struct fchs_s *rsp_fchs);
2324/** 2264/*
2325 * fcs_ms_sm FCS MS state machine 2265 * fcs_ms_sm FCS MS state machine
2326 */ 2266 */
2327 2267
2328/** 2268/*
2329 * MS State Machine events 2269 * MS State Machine events
2330 */ 2270 */
2331enum port_ms_event { 2271enum port_ms_event {
@@ -2360,7 +2300,7 @@ static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2360 enum port_ms_event event); 2300 enum port_ms_event event);
2361static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, 2301static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
2362 enum port_ms_event event); 2302 enum port_ms_event event);
2363/** 2303/*
2364 * Start in offline state - awaiting NS to send start. 2304 * Start in offline state - awaiting NS to send start.
2365 */ 2305 */
2366static void 2306static void
@@ -2432,7 +2372,7 @@ bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
2432 */ 2372 */
2433 bfa_fcs_lport_fdmi_online(ms); 2373 bfa_fcs_lport_fdmi_online(ms);
2434 2374
2435 /** 2375 /*
2436 * if this is a Vport, go to online state. 2376 * if this is a Vport, go to online state.
2437 */ 2377 */
2438 if (ms->port->vport) { 2378 if (ms->port->vport) {
@@ -2595,7 +2535,7 @@ bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
2595 bfa_sm_fault(ms->port->fcs, event); 2535 bfa_sm_fault(ms->port->fcs, event);
2596 } 2536 }
2597} 2537}
2598/** 2538/*
2599 * ms_pvt MS local functions 2539 * ms_pvt MS local functions
2600 */ 2540 */
2601 2541
@@ -2657,12 +2597,12 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2657 } 2597 }
2658 2598
2659 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 2599 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2660 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2600 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
2661 2601
2662 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 2602 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2663 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); 2603 gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
2664 2604
2665 num_entries = bfa_os_ntohl(gmal_resp->ms_len); 2605 num_entries = be32_to_cpu(gmal_resp->ms_len);
2666 if (num_entries == 0) { 2606 if (num_entries == 0) {
2667 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2607 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2668 return; 2608 return;
@@ -2795,7 +2735,7 @@ bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2795 bfa_sm_fault(ms->port->fcs, event); 2735 bfa_sm_fault(ms->port->fcs, event);
2796 } 2736 }
2797} 2737}
2798/** 2738/*
2799 * ms_pvt MS local functions 2739 * ms_pvt MS local functions
2800 */ 2740 */
2801 2741
@@ -2853,7 +2793,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2853 } 2793 }
2854 2794
2855 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 2795 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
2856 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 2796 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
2857 2797
2858 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 2798 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
2859 gfn_resp = (wwn_t *)(cthdr + 1); 2799 gfn_resp = (wwn_t *)(cthdr + 1);
@@ -2871,7 +2811,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2871 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2811 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2872} 2812}
2873 2813
2874/** 2814/*
2875 * ms_pvt MS local functions 2815 * ms_pvt MS local functions
2876 */ 2816 */
2877 2817
@@ -3017,7 +2957,7 @@ bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
3017 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); 2957 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
3018} 2958}
3019 2959
3020/** 2960/*
3021 * @page ns_sm_info VPORT NS State Machine 2961 * @page ns_sm_info VPORT NS State Machine
3022 * 2962 *
3023 * @section ns_sm_interactions VPORT NS State Machine Interactions 2963 * @section ns_sm_interactions VPORT NS State Machine Interactions
@@ -3080,11 +3020,11 @@ static void bfa_fcs_lport_ns_process_gidft_pids(
3080 u32 *pid_buf, u32 n_pids); 3020 u32 *pid_buf, u32 n_pids);
3081 3021
3082static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); 3022static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
3083/** 3023/*
3084 * fcs_ns_sm FCS nameserver interface state machine 3024 * fcs_ns_sm FCS nameserver interface state machine
3085 */ 3025 */
3086 3026
3087/** 3027/*
3088 * VPort NS State Machine events 3028 * VPort NS State Machine events
3089 */ 3029 */
3090enum vport_ns_event { 3030enum vport_ns_event {
@@ -3139,7 +3079,7 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3139 enum vport_ns_event event); 3079 enum vport_ns_event event);
3140static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, 3080static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3141 enum vport_ns_event event); 3081 enum vport_ns_event event);
3142/** 3082/*
3143 * Start in offline state - awaiting linkup 3083 * Start in offline state - awaiting linkup
3144 */ 3084 */
3145static void 3085static void
@@ -3628,7 +3568,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3628 3568
3629 3569
3630 3570
3631/** 3571/*
3632 * ns_pvt Nameserver local functions 3572 * ns_pvt Nameserver local functions
3633 */ 3573 */
3634 3574
@@ -3724,7 +3664,7 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3724 } 3664 }
3725} 3665}
3726 3666
3727/** 3667/*
3728 * Register the symbolic port name. 3668 * Register the symbolic port name.
3729 */ 3669 */
3730static void 3670static void
@@ -3738,7 +3678,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3738 u8 symbl[256]; 3678 u8 symbl[256];
3739 u8 *psymbl = &symbl[0]; 3679 u8 *psymbl = &symbl[0];
3740 3680
3741 bfa_os_memset(symbl, 0, sizeof(symbl)); 3681 memset(symbl, 0, sizeof(symbl));
3742 3682
3743 bfa_trc(port->fcs, port->port_cfg.pwwn); 3683 bfa_trc(port->fcs, port->port_cfg.pwwn);
3744 3684
@@ -3755,7 +3695,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3755 * for V-Port, form a Port Symbolic Name 3695 * for V-Port, form a Port Symbolic Name
3756 */ 3696 */
3757 if (port->vport) { 3697 if (port->vport) {
3758 /** 3698 /*
3759 * For Vports, we append the vport's port symbolic name 3699 * For Vports, we append the vport's port symbolic name
3760 * to that of the base port. 3700 * to that of the base port.
3761 */ 3701 */
@@ -3815,7 +3755,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3815 } 3755 }
3816 3756
3817 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 3757 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3818 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 3758 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
3819 3759
3820 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 3760 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3821 port->stats.ns_rspnid_accepts++; 3761 port->stats.ns_rspnid_accepts++;
@@ -3829,7 +3769,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3829 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3769 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3830} 3770}
3831 3771
3832/** 3772/*
3833 * Register FC4-Types 3773 * Register FC4-Types
3834 */ 3774 */
3835static void 3775static void
@@ -3887,7 +3827,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3887 } 3827 }
3888 3828
3889 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 3829 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3890 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 3830 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
3891 3831
3892 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 3832 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3893 port->stats.ns_rftid_accepts++; 3833 port->stats.ns_rftid_accepts++;
@@ -3901,7 +3841,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3841 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3902} 3842}
3903 3843
3904/** 3844/*
3905 * Register FC4-Features : Should be done after RFT_ID 3845 * Register FC4-Features : Should be done after RFT_ID
3906 */ 3846 */
3907static void 3847static void
@@ -3964,7 +3904,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3964 } 3904 }
3965 3905
3966 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 3906 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
3967 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 3907 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
3968 3908
3969 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 3909 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
3970 port->stats.ns_rffid_accepts++; 3910 port->stats.ns_rffid_accepts++;
@@ -3982,7 +3922,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3982 } else 3922 } else
3983 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3923 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3984} 3924}
3985/** 3925/*
3986 * Query Fabric for FC4-Types Devices. 3926 * Query Fabric for FC4-Types Devices.
3987 * 3927 *
3988* TBD : Need to use a local (FCS private) response buffer, since the response 3928* TBD : Need to use a local (FCS private) response buffer, since the response
@@ -4058,7 +3998,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4058 } 3998 }
4059 3999
4060 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 4000 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
4061 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 4001 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
4062 4002
4063 switch (cthdr->cmd_rsp_code) { 4003 switch (cthdr->cmd_rsp_code) {
4064 4004
@@ -4102,7 +4042,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4102 } 4042 }
4103} 4043}
4104 4044
4105/** 4045/*
4106 * This routine will be called by bfa_timer on timer timeouts. 4046 * This routine will be called by bfa_timer on timer timeouts.
4107 * 4047 *
4108 * param[in] port - pointer to bfa_fcs_lport_t. 4048 * param[in] port - pointer to bfa_fcs_lport_t.
@@ -4166,7 +4106,7 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
4166 } 4106 }
4167} 4107}
4168 4108
4169/** 4109/*
4170 * fcs_ns_public FCS nameserver public interfaces 4110 * fcs_ns_public FCS nameserver public interfaces
4171 */ 4111 */
4172 4112
@@ -4227,7 +4167,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4227 } 4167 }
4228} 4168}
4229 4169
4230/** 4170/*
4231 * FCS SCN 4171 * FCS SCN
4232 */ 4172 */
4233 4173
@@ -4250,11 +4190,11 @@ static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4250 struct fchs_s *rx_fchs); 4190 struct fchs_s *rx_fchs);
4251static void bfa_fcs_lport_scn_timeout(void *arg); 4191static void bfa_fcs_lport_scn_timeout(void *arg);
4252 4192
4253/** 4193/*
4254 * fcs_scm_sm FCS SCN state machine 4194 * fcs_scm_sm FCS SCN state machine
4255 */ 4195 */
4256 4196
4257/** 4197/*
4258 * VPort SCN State Machine events 4198 * VPort SCN State Machine events
4259 */ 4199 */
4260enum port_scn_event { 4200enum port_scn_event {
@@ -4278,7 +4218,7 @@ static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
4278static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, 4218static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4279 enum port_scn_event event); 4219 enum port_scn_event event);
4280 4220
4281/** 4221/*
4282 * Starting state - awaiting link up. 4222 * Starting state - awaiting link up.
4283 */ 4223 */
4284static void 4224static void
@@ -4382,11 +4322,11 @@ bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4382 4322
4383 4323
4384 4324
4385/** 4325/*
4386 * fcs_scn_private FCS SCN private functions 4326 * fcs_scn_private FCS SCN private functions
4387 */ 4327 */
4388 4328
4389/** 4329/*
4390 * This routine will be called to send a SCR command. 4330 * This routine will be called to send a SCR command.
4391 */ 4331 */
4392static void 4332static void
@@ -4499,7 +4439,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4499 FC_MAX_PDUSZ, 0); 4439 FC_MAX_PDUSZ, 0);
4500} 4440}
4501 4441
4502/** 4442/*
4503 * This routine will be called by bfa_timer on timer timeouts. 4443 * This routine will be called by bfa_timer on timer timeouts.
4504 * 4444 *
4505 * param[in] vport - pointer to bfa_fcs_lport_t. 4445 * param[in] vport - pointer to bfa_fcs_lport_t.
@@ -4522,7 +4462,7 @@ bfa_fcs_lport_scn_timeout(void *arg)
4522 4462
4523 4463
4524 4464
4525/** 4465/*
4526 * fcs_scn_public FCS state change notification public interfaces 4466 * fcs_scn_public FCS state change notification public interfaces
4527 */ 4467 */
4528 4468
@@ -4563,7 +4503,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4563 4503
4564 bfa_trc(port->fcs, rpid); 4504 bfa_trc(port->fcs, rpid);
4565 4505
4566 /** 4506 /*
4567 * If this is an unknown device, then it just came online. 4507 * If this is an unknown device, then it just came online.
4568 * Otherwise let rport handle the RSCN event. 4508 * Otherwise let rport handle the RSCN event.
4569 */ 4509 */
@@ -4579,7 +4519,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4579 bfa_fcs_rport_scn(rport); 4519 bfa_fcs_rport_scn(rport);
4580} 4520}
4581 4521
4582/** 4522/*
4583 * rscn format based PID comparison 4523 * rscn format based PID comparison
4584 */ 4524 */
4585#define __fc_pid_match(__c0, __c1, __fmt) \ 4525#define __fc_pid_match(__c0, __c1, __fmt) \
@@ -4624,7 +4564,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
4624 int i = 0, j; 4564 int i = 0, j;
4625 4565
4626 num_entries = 4566 num_entries =
4627 (bfa_os_ntohs(rscn->payldlen) - 4567 (be16_to_cpu(rscn->payldlen) -
4628 sizeof(u32)) / sizeof(rscn->event[0]); 4568 sizeof(u32)) / sizeof(rscn->event[0]);
4629 4569
4630 bfa_trc(port->fcs, num_entries); 4570 bfa_trc(port->fcs, num_entries);
@@ -4691,18 +4631,18 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
4691 } 4631 }
4692 } 4632 }
4693 4633
4694 /** 4634 /*
4695 * If any of area, domain or fabric RSCN is received, do a fresh discovery 4635 * If any of area, domain or fabric RSCN is received, do a fresh
4696 * to find new devices. 4636 * discovery to find new devices.
4697 */ 4637 */
4698 if (nsquery) 4638 if (nsquery)
4699 bfa_fcs_lport_ns_query(port); 4639 bfa_fcs_lport_ns_query(port);
4700} 4640}
4701 4641
4702/** 4642/*
4703 * BFA FCS port 4643 * BFA FCS port
4704 */ 4644 */
4705/** 4645/*
4706 * fcs_port_api BFA FCS port API 4646 * fcs_port_api BFA FCS port API
4707 */ 4647 */
4708struct bfa_fcs_lport_s * 4648struct bfa_fcs_lport_s *
@@ -4943,10 +4883,10 @@ bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
4943void 4883void
4944bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) 4884bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
4945{ 4885{
4946 bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); 4886 memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
4947} 4887}
4948 4888
4949/** 4889/*
4950 * FCS virtual port state machine 4890 * FCS virtual port state machine
4951 */ 4891 */
4952 4892
@@ -4967,11 +4907,11 @@ static void bfa_fcs_vport_timeout(void *vport_arg);
4967static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); 4907static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
4968static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); 4908static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
4969 4909
4970/** 4910/*
4971 * fcs_vport_sm FCS virtual port state machine 4911 * fcs_vport_sm FCS virtual port state machine
4972 */ 4912 */
4973 4913
4974/** 4914/*
4975 * VPort State Machine events 4915 * VPort State Machine events
4976 */ 4916 */
4977enum bfa_fcs_vport_event { 4917enum bfa_fcs_vport_event {
@@ -5024,7 +4964,7 @@ static struct bfa_sm_table_s vport_sm_table[] = {
5024 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} 4964 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
5025}; 4965};
5026 4966
5027/** 4967/*
5028 * Beginning state. 4968 * Beginning state.
5029 */ 4969 */
5030static void 4970static void
@@ -5045,7 +4985,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
5045 } 4985 }
5046} 4986}
5047 4987
5048/** 4988/*
5049 * Created state - a start event is required to start up the state machine. 4989 * Created state - a start event is required to start up the state machine.
5050 */ 4990 */
5051static void 4991static void
@@ -5062,7 +5002,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5062 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); 5002 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5063 bfa_fcs_vport_do_fdisc(vport); 5003 bfa_fcs_vport_do_fdisc(vport);
5064 } else { 5004 } else {
5065 /** 5005 /*
5066 * Fabric is offline or not NPIV capable, stay in 5006 * Fabric is offline or not NPIV capable, stay in
5067 * offline state. 5007 * offline state.
5068 */ 5008 */
@@ -5078,7 +5018,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5078 5018
5079 case BFA_FCS_VPORT_SM_ONLINE: 5019 case BFA_FCS_VPORT_SM_ONLINE:
5080 case BFA_FCS_VPORT_SM_OFFLINE: 5020 case BFA_FCS_VPORT_SM_OFFLINE:
5081 /** 5021 /*
5082 * Ignore ONLINE/OFFLINE events from fabric 5022 * Ignore ONLINE/OFFLINE events from fabric
5083 * till vport is started. 5023 * till vport is started.
5084 */ 5024 */
@@ -5089,7 +5029,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5089 } 5029 }
5090} 5030}
5091 5031
5092/** 5032/*
5093 * Offline state - awaiting ONLINE event from fabric SM. 5033 * Offline state - awaiting ONLINE event from fabric SM.
5094 */ 5034 */
5095static void 5035static void
@@ -5127,7 +5067,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
5127} 5067}
5128 5068
5129 5069
5130/** 5070/*
5131 * FDISC is sent and awaiting reply from fabric. 5071 * FDISC is sent and awaiting reply from fabric.
5132 */ 5072 */
5133static void 5073static void
@@ -5174,7 +5114,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5174 } 5114 }
5175} 5115}
5176 5116
5177/** 5117/*
5178 * FDISC attempt failed - a timer is active to retry FDISC. 5118 * FDISC attempt failed - a timer is active to retry FDISC.
5179 */ 5119 */
5180static void 5120static void
@@ -5208,7 +5148,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5208 } 5148 }
5209} 5149}
5210 5150
5211/** 5151/*
5212 * Vport is online (FDISC is complete). 5152 * Vport is online (FDISC is complete).
5213 */ 5153 */
5214static void 5154static void
@@ -5235,7 +5175,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5235 } 5175 }
5236} 5176}
5237 5177
5238/** 5178/*
5239 * Vport is being deleted - awaiting lport delete completion to send 5179 * Vport is being deleted - awaiting lport delete completion to send
5240 * LOGO to fabric. 5180 * LOGO to fabric.
5241 */ 5181 */
@@ -5264,7 +5204,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
5264 } 5204 }
5265} 5205}
5266 5206
5267/** 5207/*
5268 * Error State. 5208 * Error State.
5269 * This state will be set when the Vport Creation fails due 5209 * This state will be set when the Vport Creation fails due
5270 * to errors like Dup WWN. In this state only operation allowed 5210 * to errors like Dup WWN. In this state only operation allowed
@@ -5288,7 +5228,7 @@ bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
5288 } 5228 }
5289} 5229}
5290 5230
5291/** 5231/*
5292 * Lport cleanup is in progress since vport is being deleted. Fabric is 5232 * Lport cleanup is in progress since vport is being deleted. Fabric is
5293 * offline, so no LOGO is needed to complete vport deletion. 5233 * offline, so no LOGO is needed to complete vport deletion.
5294 */ 5234 */
@@ -5313,7 +5253,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5313 } 5253 }
5314} 5254}
5315 5255
5316/** 5256/*
5317 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup 5257 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
5318 * is done. 5258 * is done.
5319 */ 5259 */
@@ -5347,10 +5287,10 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5347 5287
5348 5288
5349 5289
5350/** 5290/*
5351 * fcs_vport_private FCS virtual port private functions 5291 * fcs_vport_private FCS virtual port private functions
5352 */ 5292 */
5353/** 5293/*
5354 * This routine will be called to send a FDISC command. 5294 * This routine will be called to send a FDISC command.
5355 */ 5295 */
5356static void 5296static void
@@ -5397,7 +5337,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5397 } 5337 }
5398} 5338}
5399 5339
5400/** 5340/*
5401 * Called to send a logout to the fabric. Used when a V-Port is 5341 * Called to send a logout to the fabric. Used when a V-Port is
5402 * deleted/stopped. 5342 * deleted/stopped.
5403 */ 5343 */
@@ -5411,7 +5351,7 @@ bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
5411} 5351}
5412 5352
5413 5353
5414/** 5354/*
5415 * This routine will be called by bfa_timer on timer timeouts. 5355 * This routine will be called by bfa_timer on timer timeouts.
5416 * 5356 *
5417 * param[in] vport - pointer to bfa_fcs_vport_t. 5357 * param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5449,11 +5389,11 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5449 5389
5450 5390
5451 5391
5452/** 5392/*
5453 * fcs_vport_public FCS virtual port public interfaces 5393 * fcs_vport_public FCS virtual port public interfaces
5454 */ 5394 */
5455 5395
5456/** 5396/*
5457 * Online notification from fabric SM. 5397 * Online notification from fabric SM.
5458 */ 5398 */
5459void 5399void
@@ -5463,7 +5403,7 @@ bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
5463 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 5403 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5464} 5404}
5465 5405
5466/** 5406/*
5467 * Offline notification from fabric SM. 5407 * Offline notification from fabric SM.
5468 */ 5408 */
5469void 5409void
@@ -5473,7 +5413,7 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
5473 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); 5413 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
5474} 5414}
5475 5415
5476/** 5416/*
5477 * Cleanup notification from fabric SM on link timer expiry. 5417 * Cleanup notification from fabric SM on link timer expiry.
5478 */ 5418 */
5479void 5419void
@@ -5481,7 +5421,7 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
5481{ 5421{
5482 vport->vport_stats.fab_cleanup++; 5422 vport->vport_stats.fab_cleanup++;
5483} 5423}
5484/** 5424/*
5485 * delete notification from fabric SM. To be invoked from within FCS. 5425 * delete notification from fabric SM. To be invoked from within FCS.
5486 */ 5426 */
5487void 5427void
@@ -5490,7 +5430,7 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
5490 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); 5430 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
5491} 5431}
5492 5432
5493/** 5433/*
5494 * Delete completion callback from associated lport 5434 * Delete completion callback from associated lport
5495 */ 5435 */
5496void 5436void
@@ -5501,11 +5441,11 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
5501 5441
5502 5442
5503 5443
5504/** 5444/*
5505 * fcs_vport_api Virtual port API 5445 * fcs_vport_api Virtual port API
5506 */ 5446 */
5507 5447
5508/** 5448/*
5509 * Use this function to instantiate a new FCS vport object. This 5449 * Use this function to instantiate a new FCS vport object. This
5510 * function will not trigger any HW initialization process (which will be 5450 * function will not trigger any HW initialization process (which will be
5511 * done in vport_start() call) 5451 * done in vport_start() call)
@@ -5555,7 +5495,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5555 return BFA_STATUS_OK; 5495 return BFA_STATUS_OK;
5556} 5496}
5557 5497
5558/** 5498/*
5559 * Use this function to instantiate a new FCS PBC vport object. This 5499 * Use this function to instantiate a new FCS PBC vport object. This
5560 * function will not trigger any HW initialization process (which will be 5500 * function will not trigger any HW initialization process (which will be
5561 * done in vport_start() call) 5501 * done in vport_start() call)
@@ -5585,7 +5525,7 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5585 return rc; 5525 return rc;
5586} 5526}
5587 5527
5588/** 5528/*
5589 * Use this function to findout if this is a pbc vport or not. 5529 * Use this function to findout if this is a pbc vport or not.
5590 * 5530 *
5591 * @param[in] vport - pointer to bfa_fcs_vport_t. 5531 * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5603,7 +5543,7 @@ bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
5603 5543
5604} 5544}
5605 5545
5606/** 5546/*
5607 * Use this function initialize the vport. 5547 * Use this function initialize the vport.
5608 * 5548 *
5609 * @param[in] vport - pointer to bfa_fcs_vport_t. 5549 * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5618,7 +5558,7 @@ bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
5618 return BFA_STATUS_OK; 5558 return BFA_STATUS_OK;
5619} 5559}
5620 5560
5621/** 5561/*
5622 * Use this function quiese the vport object. This function will return 5562 * Use this function quiese the vport object. This function will return
5623 * immediately, when the vport is actually stopped, the 5563 * immediately, when the vport is actually stopped, the
5624 * bfa_drv_vport_stop_cb() will be called. 5564 * bfa_drv_vport_stop_cb() will be called.
@@ -5635,7 +5575,7 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
5635 return BFA_STATUS_OK; 5575 return BFA_STATUS_OK;
5636} 5576}
5637 5577
5638/** 5578/*
5639 * Use this function to delete a vport object. Fabric object should 5579 * Use this function to delete a vport object. Fabric object should
5640 * be stopped before this function call. 5580 * be stopped before this function call.
5641 * 5581 *
@@ -5657,7 +5597,7 @@ bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
5657 return BFA_STATUS_OK; 5597 return BFA_STATUS_OK;
5658} 5598}
5659 5599
5660/** 5600/*
5661 * Use this function to get vport's current status info. 5601 * Use this function to get vport's current status info.
5662 * 5602 *
5663 * param[in] vport pointer to bfa_fcs_vport_t. 5603 * param[in] vport pointer to bfa_fcs_vport_t.
@@ -5672,13 +5612,13 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
5672 if (vport == NULL || attr == NULL) 5612 if (vport == NULL || attr == NULL)
5673 return; 5613 return;
5674 5614
5675 bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s)); 5615 memset(attr, 0, sizeof(struct bfa_vport_attr_s));
5676 5616
5677 bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); 5617 bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
5678 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); 5618 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
5679} 5619}
5680 5620
5681/** 5621/*
5682 * Use this function to get vport's statistics. 5622 * Use this function to get vport's statistics.
5683 * 5623 *
5684 * param[in] vport pointer to bfa_fcs_vport_t. 5624 * param[in] vport pointer to bfa_fcs_vport_t.
@@ -5693,7 +5633,7 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
5693 *stats = vport->vport_stats; 5633 *stats = vport->vport_stats;
5694} 5634}
5695 5635
5696/** 5636/*
5697 * Use this function to clear vport's statistics. 5637 * Use this function to clear vport's statistics.
5698 * 5638 *
5699 * param[in] vport pointer to bfa_fcs_vport_t. 5639 * param[in] vport pointer to bfa_fcs_vport_t.
@@ -5703,10 +5643,10 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
5703void 5643void
5704bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport) 5644bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
5705{ 5645{
5706 bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 5646 memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
5707} 5647}
5708 5648
5709/** 5649/*
5710 * Lookup a virtual port. Excludes base port from lookup. 5650 * Lookup a virtual port. Excludes base port from lookup.
5711 */ 5651 */
5712struct bfa_fcs_vport_s * 5652struct bfa_fcs_vport_s *
@@ -5728,7 +5668,7 @@ bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
5728 return vport; 5668 return vport;
5729} 5669}
5730 5670
5731/** 5671/*
5732 * FDISC Response 5672 * FDISC Response
5733 */ 5673 */
5734void 5674void
@@ -5784,7 +5724,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
5784 } 5724 }
5785} 5725}
5786 5726
5787/** 5727/*
5788 * LOGO response 5728 * LOGO response
5789 */ 5729 */
5790void 5730void
@@ -5794,7 +5734,7 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
5794 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 5734 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5795} 5735}
5796 5736
5797/** 5737/*
5798 * Received clear virtual link 5738 * Received clear virtual link
5799 */ 5739 */
5800void 5740void
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 635f0cd8871..47f35c0ef29 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * rport.c Remote port implementation. 19 * rport.c Remote port implementation.
20 */ 20 */
21 21
@@ -75,7 +75,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
76 struct fchs_s *rx_fchs, u16 len); 76 struct fchs_s *rx_fchs, u16 len);
77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
78/** 78/*
79 * fcs_rport_sm FCS rport state machine events 79 * fcs_rport_sm FCS rport state machine events
80 */ 80 */
81 81
@@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, 172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
173}; 173};
174 174
175/** 175/*
176 * Beginning state. 176 * Beginning state.
177 */ 177 */
178static void 178static void
@@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
210 } 210 }
211} 211}
212 212
213/** 213/*
214 * PLOGI is being sent. 214 * PLOGI is being sent.
215 */ 215 */
216static void 216static void
@@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
262 } 262 }
263} 263}
264 264
265/** 265/*
266 * PLOGI is being sent. 266 * PLOGI is being sent.
267 */ 267 */
268static void 268static void
@@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
287 287
288 case RPSM_EVENT_PLOGI_RCVD: 288 case RPSM_EVENT_PLOGI_RCVD:
289 case RPSM_EVENT_SCN: 289 case RPSM_EVENT_SCN:
290 /** 290 /*
291 * Ignore, SCN is possibly online notification. 291 * Ignore, SCN is possibly online notification.
292 */ 292 */
293 break; 293 break;
@@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
309 break; 309 break;
310 310
311 case RPSM_EVENT_HCB_OFFLINE: 311 case RPSM_EVENT_HCB_OFFLINE:
312 /** 312 /*
313 * Ignore BFA callback, on a PLOGI receive we call bfa offline. 313 * Ignore BFA callback, on a PLOGI receive we call bfa offline.
314 */ 314 */
315 break; 315 break;
@@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
319 } 319 }
320} 320}
321 321
322/** 322/*
323 * PLOGI is sent. 323 * PLOGI is sent.
324 */ 324 */
325static void 325static void
@@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
380 } 380 }
381} 381}
382 382
383/** 383/*
384 * PLOGI is sent. 384 * PLOGI is sent.
385 */ 385 */
386static void 386static void
@@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
475 } 475 }
476} 476}
477 477
478/** 478/*
479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s
480 * are offline. 480 * are offline.
481 */ 481 */
@@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
519 break; 519 break;
520 520
521 case RPSM_EVENT_SCN: 521 case RPSM_EVENT_SCN:
522 /** 522 /*
523 * @todo 523 * @todo
524 * Ignore SCN - PLOGI just completed, FC-4 login should detect 524 * Ignore SCN - PLOGI just completed, FC-4 login should detect
525 * device failures. 525 * device failures.
@@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
531 } 531 }
532} 532}
533 533
534/** 534/*
535 * Rport is ONLINE. FC-4s active. 535 * Rport is ONLINE. FC-4s active.
536 */ 536 */
537static void 537static void
@@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
580 } 580 }
581} 581}
582 582
583/** 583/*
584 * An SCN event is received in ONLINE state. NS query is being sent 584 * An SCN event is received in ONLINE state. NS query is being sent
585 * prior to ADISC authentication with rport. FC-4s are paused. 585 * prior to ADISC authentication with rport. FC-4s are paused.
586 */ 586 */
@@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
604 break; 604 break;
605 605
606 case RPSM_EVENT_SCN: 606 case RPSM_EVENT_SCN:
607 /** 607 /*
608 * ignore SCN, wait for response to query itself 608 * ignore SCN, wait for response to query itself
609 */ 609 */
610 break; 610 break;
@@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
638 } 638 }
639} 639}
640 640
641/** 641/*
642 * An SCN event is received in ONLINE state. NS query is sent to rport. 642 * An SCN event is received in ONLINE state. NS query is sent to rport.
643 * FC-4s are paused. 643 * FC-4s are paused.
644 */ 644 */
@@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
697 } 697 }
698} 698}
699 699
700/** 700/*
701 * An SCN event is received in ONLINE state. ADISC is being sent for 701 * An SCN event is received in ONLINE state. ADISC is being sent for
702 * authenticating with rport. FC-4s are paused. 702 * authenticating with rport. FC-4s are paused.
703 */ 703 */
@@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
748 } 748 }
749} 749}
750 750
751/** 751/*
752 * An SCN event is received in ONLINE state. ADISC is to rport. 752 * An SCN event is received in ONLINE state. ADISC is to rport.
753 * FC-4s are paused. 753 * FC-4s are paused.
754 */ 754 */
@@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
765 break; 765 break;
766 766
767 case RPSM_EVENT_PLOGI_RCVD: 767 case RPSM_EVENT_PLOGI_RCVD:
768 /** 768 /*
769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI. 769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
770 * At least go offline when a PLOGI is received. 770 * At least go offline when a PLOGI is received.
771 */ 771 */
@@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
787 break; 787 break;
788 788
789 case RPSM_EVENT_SCN: 789 case RPSM_EVENT_SCN:
790 /** 790 /*
791 * already processing RSCN 791 * already processing RSCN
792 */ 792 */
793 break; 793 break;
@@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
810 } 810 }
811} 811}
812 812
813/** 813/*
814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
815 */ 815 */
816static void 816static void
@@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
841 } 841 }
842} 842}
843 843
844/** 844/*
845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion
846 * callback. 846 * callback.
847 */ 847 */
@@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
864 } 864 }
865} 865}
866 866
867/** 867/*
868 * Rport is going offline. Awaiting FC-4 offline completion callback. 868 * Rport is going offline. Awaiting FC-4 offline completion callback.
869 */ 869 */
870static void 870static void
@@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
886 case RPSM_EVENT_LOGO_RCVD: 886 case RPSM_EVENT_LOGO_RCVD:
887 case RPSM_EVENT_PRLO_RCVD: 887 case RPSM_EVENT_PRLO_RCVD:
888 case RPSM_EVENT_ADDRESS_CHANGE: 888 case RPSM_EVENT_ADDRESS_CHANGE:
889 /** 889 /*
890 * rport is already going offline. 890 * rport is already going offline.
891 * SCN - ignore and wait till transitioning to offline state 891 * SCN - ignore and wait till transitioning to offline state
892 */ 892 */
@@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
901 } 901 }
902} 902}
903 903
904/** 904/*
905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
906 * callback. 906 * callback.
907 */ 907 */
@@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
945 case RPSM_EVENT_SCN: 945 case RPSM_EVENT_SCN:
946 case RPSM_EVENT_LOGO_RCVD: 946 case RPSM_EVENT_LOGO_RCVD:
947 case RPSM_EVENT_PRLO_RCVD: 947 case RPSM_EVENT_PRLO_RCVD:
948 /** 948 /*
949 * Ignore, already offline. 949 * Ignore, already offline.
950 */ 950 */
951 break; 951 break;
@@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
955 } 955 }
956} 956}
957 957
958/** 958/*
959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
960 * callback to send LOGO accept. 960 * callback to send LOGO accept.
961 */ 961 */
@@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1009 1009
1010 case RPSM_EVENT_LOGO_RCVD: 1010 case RPSM_EVENT_LOGO_RCVD:
1011 case RPSM_EVENT_PRLO_RCVD: 1011 case RPSM_EVENT_PRLO_RCVD:
1012 /** 1012 /*
1013 * Ignore - already processing a LOGO. 1013 * Ignore - already processing a LOGO.
1014 */ 1014 */
1015 break; 1015 break;
@@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1019 } 1019 }
1020} 1020}
1021 1021
1022/** 1022/*
1023 * Rport is being deleted. FC-4s are offline. 1023 * Rport is being deleted. FC-4s are offline.
1024 * Awaiting BFA rport offline 1024 * Awaiting BFA rport offline
1025 * callback to send LOGO. 1025 * callback to send LOGO.
@@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1048 } 1048 }
1049} 1049}
1050 1050
1051/** 1051/*
1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent.
1053 */ 1053 */
1054static void 1054static void
@@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1082 } 1082 }
1083} 1083}
1084 1084
1085/** 1085/*
1086 * Rport is offline. FC-4s are offline. BFA rport is offline. 1086 * Rport is offline. FC-4s are offline. BFA rport is offline.
1087 * Timer active to delete stale rport. 1087 * Timer active to delete stale rport.
1088 */ 1088 */
@@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1142 } 1142 }
1143} 1143}
1144 1144
1145/** 1145/*
1146 * Rport address has changed. Nameserver discovery request is being sent. 1146 * Rport address has changed. Nameserver discovery request is being sent.
1147 */ 1147 */
1148static void 1148static void
@@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1199 } 1199 }
1200} 1200}
1201 1201
1202/** 1202/*
1203 * Nameserver discovery failed. Waiting for timeout to retry. 1203 * Nameserver discovery failed. Waiting for timeout to retry.
1204 */ 1204 */
1205static void 1205static void
@@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1263 } 1263 }
1264} 1264}
1265 1265
1266/** 1266/*
1267 * Rport address has changed. Nameserver discovery request is sent. 1267 * Rport address has changed. Nameserver discovery request is sent.
1268 */ 1268 */
1269static void 1269static void
@@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1329 bfa_fcs_rport_send_prlo_acc(rport); 1329 bfa_fcs_rport_send_prlo_acc(rport);
1330 break; 1330 break;
1331 case RPSM_EVENT_SCN: 1331 case RPSM_EVENT_SCN:
1332 /** 1332 /*
1333 * ignore, wait for NS query response 1333 * ignore, wait for NS query response
1334 */ 1334 */
1335 break; 1335 break;
1336 1336
1337 case RPSM_EVENT_LOGO_RCVD: 1337 case RPSM_EVENT_LOGO_RCVD:
1338 /** 1338 /*
1339 * Not logged-in yet. Accept LOGO. 1339 * Not logged-in yet. Accept LOGO.
1340 */ 1340 */
1341 bfa_fcs_rport_send_logo_acc(rport); 1341 bfa_fcs_rport_send_logo_acc(rport);
@@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1354 1354
1355 1355
1356 1356
1357/** 1357/*
1358 * fcs_rport_private FCS RPORT provate functions 1358 * fcs_rport_private FCS RPORT provate functions
1359 */ 1359 */
1360 1360
@@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1415 1415
1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); 1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
1417 1417
1418 /** 1418 /*
1419 * Check for failure first. 1419 * Check for failure first.
1420 */ 1420 */
1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { 1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
@@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1436 return; 1436 return;
1437 } 1437 }
1438 1438
1439 /** 1439 /*
1440 * PLOGI is complete. Make sure this device is not one of the known 1440 * PLOGI is complete. Make sure this device is not one of the known
1441 * device with a new FC port address. 1441 * device with a new FC port address.
1442 */ 1442 */
@@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1468 } 1468 }
1469 } 1469 }
1470 1470
1471 /** 1471 /*
1472 * Normal login path -- no evil twins. 1472 * Normal login path -- no evil twins.
1473 */ 1473 */
1474 rport->stats.plogi_accs++; 1474 rport->stats.plogi_accs++;
@@ -1621,7 +1621,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1621 bfa_trc(rport->fcs, rport->pwwn); 1621 bfa_trc(rport->fcs, rport->pwwn);
1622 1622
1623 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1623 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1624 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1624 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
1625 1625
1626 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1626 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1627 /* Check if the pid is the same as before. */ 1627 /* Check if the pid is the same as before. */
@@ -1691,7 +1691,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1691 bfa_trc(rport->fcs, rport->pwwn); 1691 bfa_trc(rport->fcs, rport->pwwn);
1692 1692
1693 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); 1693 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
1694 cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); 1694 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
1695 1695
1696 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { 1696 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
1697 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); 1697 bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
@@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1722 } 1722 }
1723} 1723}
1724 1724
1725/** 1725/*
1726 * Called to send a logout to the rport. 1726 * Called to send a logout to the rport.
1727 */ 1727 */
1728static void 1728static void
@@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1760} 1760}
1761 1761
1762/** 1762/*
1763 * Send ACC for a LOGO received. 1763 * Send ACC for a LOGO received.
1764 */ 1764 */
1765static void 1765static void
@@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1789} 1789}
1790 1790
1791/** 1791/*
1792 * brief 1792 * brief
1793 * This routine will be called by bfa_timer on timer timeouts. 1793 * This routine will be called by bfa_timer on timer timeouts.
1794 * 1794 *
@@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1961 struct bfa_fcs_rport_s *rport; 1961 struct bfa_fcs_rport_s *rport;
1962 struct bfad_rport_s *rport_drv; 1962 struct bfad_rport_s *rport_drv;
1963 1963
1964 /** 1964 /*
1965 * allocate rport 1965 * allocate rport
1966 */ 1966 */
1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
@@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1979 rport->pid = rpid; 1979 rport->pid = rpid;
1980 rport->pwwn = pwwn; 1980 rport->pwwn = pwwn;
1981 1981
1982 /** 1982 /*
1983 * allocate BFA rport 1983 * allocate BFA rport
1984 */ 1984 */
1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); 1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
@@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1989 return NULL; 1989 return NULL;
1990 } 1990 }
1991 1991
1992 /** 1992 /*
1993 * allocate FC-4s 1993 * allocate FC-4s
1994 */ 1994 */
1995 bfa_assert(bfa_fcs_lport_is_initiator(port)); 1995 bfa_assert(bfa_fcs_lport_is_initiator(port));
@@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2021{ 2021{
2022 struct bfa_fcs_lport_s *port = rport->port; 2022 struct bfa_fcs_lport_s *port = rport->port;
2023 2023
2024 /** 2024 /*
2025 * - delete FC-4s 2025 * - delete FC-4s
2026 * - delete BFA rport 2026 * - delete BFA rport
2027 * - remove from queue of rports 2027 * - remove from queue of rports
@@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2093 } 2093 }
2094} 2094}
2095 2095
2096/** 2096/*
2097 * Update rport parameters from PLOGI or PLOGI accept. 2097 * Update rport parameters from PLOGI or PLOGI accept.
2098 */ 2098 */
2099static void 2099static void
@@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2101{ 2101{
2102 bfa_fcs_lport_t *port = rport->port; 2102 bfa_fcs_lport_t *port = rport->port;
2103 2103
2104 /** 2104 /*
2105 * - port name 2105 * - port name
2106 * - node name 2106 * - node name
2107 */ 2107 */
2108 rport->pwwn = plogi->port_name; 2108 rport->pwwn = plogi->port_name;
2109 rport->nwwn = plogi->node_name; 2109 rport->nwwn = plogi->node_name;
2110 2110
2111 /** 2111 /*
2112 * - class of service 2112 * - class of service
2113 */ 2113 */
2114 rport->fc_cos = 0; 2114 rport->fc_cos = 0;
@@ -2118,16 +2118,16 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2118 if (plogi->class2.class_valid) 2118 if (plogi->class2.class_valid)
2119 rport->fc_cos |= FC_CLASS_2; 2119 rport->fc_cos |= FC_CLASS_2;
2120 2120
2121 /** 2121 /*
2122 * - CISC 2122 * - CISC
2123 * - MAX receive frame size 2123 * - MAX receive frame size
2124 */ 2124 */
2125 rport->cisc = plogi->csp.cisc; 2125 rport->cisc = plogi->csp.cisc;
2126 rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz); 2126 rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
2127 2127
2128 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); 2128 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
2129 bfa_trc(port->fcs, port->fabric->bb_credit); 2129 bfa_trc(port->fcs, port->fabric->bb_credit);
2130 /** 2130 /*
2131 * Direct Attach P2P mode : 2131 * Direct Attach P2P mode :
2132 * This is to handle a bug (233476) in IBM targets in Direct Attach 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach
2133 * Mode. Basically, in FLOGI Accept the target would have 2133 * Mode. Basically, in FLOGI Accept the target would have
@@ -2136,19 +2136,19 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2136 * in PLOGI. 2136 * in PLOGI.
2137 */ 2137 */
2138 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && 2138 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
2139 (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { 2139 (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
2140 2140
2141 bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); 2141 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
2142 bfa_trc(port->fcs, port->fabric->bb_credit); 2142 bfa_trc(port->fcs, port->fabric->bb_credit);
2143 2143
2144 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); 2144 port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
2145 bfa_fcport_set_tx_bbcredit(port->fcs->bfa, 2145 bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
2146 port->fabric->bb_credit); 2146 port->fabric->bb_credit);
2147 } 2147 }
2148 2148
2149} 2149}
2150 2150
2151/** 2151/*
2152 * Called to handle LOGO received from an existing remote port. 2152 * Called to handle LOGO received from an existing remote port.
2153 */ 2153 */
2154static void 2154static void
@@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2164 2164
2165 2165
2166 2166
2167/** 2167/*
2168 * fcs_rport_public FCS rport public interfaces 2168 * fcs_rport_public FCS rport public interfaces
2169 */ 2169 */
2170 2170
2171/** 2171/*
2172 * Called by bport/vport to create a remote port instance for a discovered 2172 * Called by bport/vport to create a remote port instance for a discovered
2173 * remote device. 2173 * remote device.
2174 * 2174 *
@@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
2191 return rport; 2191 return rport;
2192} 2192}
2193 2193
2194/** 2194/*
2195 * Called to create a rport for which only the wwn is known. 2195 * Called to create a rport for which only the wwn is known.
2196 * 2196 *
2197 * @param[in] port - base port 2197 * @param[in] port - base port
@@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
2212 return rport; 2212 return rport;
2213} 2213}
2214/** 2214/*
2215 * Called by bport in private loop topology to indicate that a 2215 * Called by bport in private loop topology to indicate that a
2216 * rport has been discovered and plogi has been completed. 2216 * rport has been discovered and plogi has been completed.
2217 * 2217 *
@@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); 2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
2234} 2234}
2235 2235
2236/** 2236/*
2237 * Called by bport/vport to handle PLOGI received from a new remote port. 2237 * Called by bport/vport to handle PLOGI received from a new remote port.
2238 * If an existing rport does a plogi, it will be handled separately. 2238 * If an existing rport does a plogi, it will be handled separately.
2239 */ 2239 */
@@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
2272 return 0; 2272 return 0;
2273} 2273}
2274 2274
2275/** 2275/*
2276 * Called by bport/vport to handle PLOGI received from an existing 2276 * Called by bport/vport to handle PLOGI received from an existing
2277 * remote port. 2277 * remote port.
2278 */ 2278 */
@@ -2280,7 +2280,7 @@ void
2280bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2280bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2281 struct fc_logi_s *plogi) 2281 struct fc_logi_s *plogi)
2282{ 2282{
2283 /** 2283 /*
2284 * @todo Handle P2P and initiator-initiator. 2284 * @todo Handle P2P and initiator-initiator.
2285 */ 2285 */
2286 2286
@@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2289 rport->reply_oxid = rx_fchs->ox_id; 2289 rport->reply_oxid = rx_fchs->ox_id;
2290 bfa_trc(rport->fcs, rport->reply_oxid); 2290 bfa_trc(rport->fcs, rport->reply_oxid);
2291 2291
2292 /** 2292 /*
2293 * In Switched fabric topology, 2293 * In Switched fabric topology,
2294 * PLOGI to each other. If our pwwn is smaller, ignore it, 2294 * PLOGI to each other. If our pwwn is smaller, ignore it,
2295 * if it is not a well known address. 2295 * if it is not a well known address.
@@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
2308} 2308}
2309 2309
2310/** 2310/*
2311 * Called by bport/vport to delete a remote port instance. 2311 * Called by bport/vport to delete a remote port instance.
2312 * 2312 *
2313 * Rport delete is called under the following conditions: 2313 * Rport delete is called under the following conditions:
@@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE); 2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
2322} 2322}
2323 2323
2324/** 2324/*
2325 * Called by bport/vport to when a target goes offline. 2325 * Called by bport/vport to when a target goes offline.
2326 * 2326 *
2327 */ 2327 */
@@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2332} 2332}
2333 2333
2334/** 2334/*
2335 * Called by bport in n2n when a target (attached port) becomes online. 2335 * Called by bport in n2n when a target (attached port) becomes online.
2336 * 2336 *
2337 */ 2337 */
@@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
2340{ 2340{
2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2342} 2342}
2343/** 2343/*
2344 * Called by bport/vport to notify SCN for the remote port 2344 * Called by bport/vport to notify SCN for the remote port
2345 */ 2345 */
2346void 2346void
@@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2351} 2351}
2352 2352
2353/** 2353/*
2354 * Called by fcpim to notify that the ITN cleanup is done. 2354 * Called by fcpim to notify that the ITN cleanup is done.
2355 */ 2355 */
2356void 2356void
@@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2360} 2360}
2361 2361
2362/** 2362/*
2363 * Called by fcptm to notify that the ITN cleanup is done. 2363 * Called by fcptm to notify that the ITN cleanup is done.
2364 */ 2364 */
2365void 2365void
@@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2369} 2369}
2370 2370
2371/** 2371/*
2372 * brief 2372 * brief
2373 * This routine BFA callback for bfa_rport_online() call. 2373 * This routine BFA callback for bfa_rport_online() call.
2374 * 2374 *
@@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg)
2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
2392} 2392}
2393 2393
2394/** 2394/*
2395 * brief 2395 * brief
2396 * This routine BFA callback for bfa_rport_offline() call. 2396 * This routine BFA callback for bfa_rport_offline() call.
2397 * 2397 *
@@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg)
2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
2414} 2414}
2415 2415
2416/** 2416/*
2417 * brief 2417 * brief
2418 * This routine is a static BFA callback when there is a QoS flow_id 2418 * This routine is a static BFA callback when there is a QoS flow_id
2419 * change notification 2419 * change notification
@@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2437 bfa_trc(rport->fcs, rport->pwwn); 2437 bfa_trc(rport->fcs, rport->pwwn);
2438} 2438}
2439 2439
2440/** 2440/*
2441 * brief 2441 * brief
2442 * This routine is a static BFA callback when there is a QoS priority 2442 * This routine is a static BFA callback when there is a QoS priority
2443 * change notification 2443 * change notification
@@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
2461 bfa_trc(rport->fcs, rport->pwwn); 2461 bfa_trc(rport->fcs, rport->pwwn);
2462} 2462}
2463 2463
2464/** 2464/*
2465 * Called to process any unsolicted frames from this remote port 2465 * Called to process any unsolicted frames from this remote port
2466 */ 2466 */
2467void 2467void
@@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2471} 2471}
2472 2472
2473/** 2473/*
2474 * Called to process any unsolicted frames from this remote port 2474 * Called to process any unsolicted frames from this remote port
2475 */ 2475 */
2476void 2476void
@@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2577 FC_MAX_PDUSZ, 0); 2577 FC_MAX_PDUSZ, 0);
2578} 2578}
2579 2579
2580/** 2580/*
2581 * Return state of rport. 2581 * Return state of rport.
2582 */ 2582 */
2583int 2583int
@@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
2586 return bfa_sm_to_state(rport_sm_table, rport->sm); 2586 return bfa_sm_to_state(rport_sm_table, rport->sm);
2587} 2587}
2588 2588
2589/** 2589/*
2590 * brief 2590 * brief
2591 * Called by the Driver to set rport delete/ageout timeout 2591 * Called by the Driver to set rport delete/ageout timeout
2592 * 2592 *
@@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
2613 2613
2614 2614
2615 2615
2616/** 2616/*
2617 * Remote port implementation. 2617 * Remote port implementation.
2618 */ 2618 */
2619 2619
2620/** 2620/*
2621 * fcs_rport_api FCS rport API. 2621 * fcs_rport_api FCS rport API.
2622 */ 2622 */
2623 2623
2624/** 2624/*
2625 * Direct API to add a target by port wwn. This interface is used, for 2625 * Direct API to add a target by port wwn. This interface is used, for
2626 * example, by bios when target pwwn is known from boot lun configuration. 2626 * example, by bios when target pwwn is known from boot lun configuration.
2627 */ 2627 */
@@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
2634 return BFA_STATUS_OK; 2634 return BFA_STATUS_OK;
2635} 2635}
2636 2636
2637/** 2637/*
2638 * Direct API to remove a target and its associated resources. This 2638 * Direct API to remove a target and its associated resources. This
2639 * interface is used, for example, by driver to remove target 2639 * interface is used, for example, by driver to remove target
2640 * ports from the target list for a VM. 2640 * ports from the target list for a VM.
@@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
2663 2663
2664} 2664}
2665 2665
2666/** 2666/*
2667 * Remote device status for display/debug. 2667 * Remote device status for display/debug.
2668 */ 2668 */
2669void 2669void
@@ -2674,7 +2674,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2674 bfa_fcs_lport_t *port = rport->port; 2674 bfa_fcs_lport_t *port = rport->port;
2675 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; 2675 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
2676 2676
2677 bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 2677 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2678 2678
2679 rport_attr->pid = rport->pid; 2679 rport_attr->pid = rport->pid;
2680 rport_attr->pwwn = rport->pwwn; 2680 rport_attr->pwwn = rport->pwwn;
@@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2704 } 2704 }
2705} 2705}
2706 2706
2707/** 2707/*
2708 * Per remote device statistics. 2708 * Per remote device statistics.
2709 */ 2709 */
2710void 2710void
@@ -2717,7 +2717,7 @@ bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
2717void 2717void
2718bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport) 2718bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
2719{ 2719{
2720 bfa_os_memset((char *)&rport->stats, 0, 2720 memset((char *)&rport->stats, 0,
2721 sizeof(struct bfa_rport_stats_s)); 2721 sizeof(struct bfa_rport_stats_s));
2722} 2722}
2723 2723
@@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
2767 2767
2768 2768
2769 2769
2770/** 2770/*
2771 * Remote port features (RPF) implementation. 2771 * Remote port features (RPF) implementation.
2772 */ 2772 */
2773 2773
@@ -2786,7 +2786,7 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
2786 2786
2787static void bfa_fcs_rpf_timeout(void *arg); 2787static void bfa_fcs_rpf_timeout(void *arg);
2788 2788
2789/** 2789/*
2790 * fcs_rport_ftrs_sm FCS rport state machine events 2790 * fcs_rport_ftrs_sm FCS rport state machine events
2791 */ 2791 */
2792 2792
@@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2981 bfa_sm_fault(rport->fcs, event); 2981 bfa_sm_fault(rport->fcs, event);
2982 } 2982 }
2983} 2983}
2984/** 2984/*
2985 * Called when Rport is created. 2985 * Called when Rport is created.
2986 */ 2986 */
2987void 2987void
@@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); 2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
2996} 2996}
2997 2997
2998/** 2998/*
2999 * Called when Rport becomes online 2999 * Called when Rport becomes online
3000 */ 3000 */
3001void 3001void
@@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); 3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
3011} 3011}
3012 3012
3013/** 3013/*
3014 * Called when Rport becomes offline 3014 * Called when Rport becomes offline
3015 */ 3015 */
3016void 3016void
@@ -3090,16 +3090,16 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
3090 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); 3090 rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
3091 if (rpsc2_acc->els_cmd == FC_ELS_ACC) { 3091 if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
3092 rport->stats.rpsc_accs++; 3092 rport->stats.rpsc_accs++;
3093 num_ents = bfa_os_ntohs(rpsc2_acc->num_pids); 3093 num_ents = be16_to_cpu(rpsc2_acc->num_pids);
3094 bfa_trc(rport->fcs, num_ents); 3094 bfa_trc(rport->fcs, num_ents);
3095 if (num_ents > 0) { 3095 if (num_ents > 0) {
3096 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); 3096 bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
3097 bfa_trc(rport->fcs, 3097 bfa_trc(rport->fcs,
3098 bfa_os_ntohs(rpsc2_acc->port_info[0].pid)); 3098 be16_to_cpu(rpsc2_acc->port_info[0].pid));
3099 bfa_trc(rport->fcs, 3099 bfa_trc(rport->fcs,
3100 bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); 3100 be16_to_cpu(rpsc2_acc->port_info[0].speed));
3101 bfa_trc(rport->fcs, 3101 bfa_trc(rport->fcs,
3102 bfa_os_ntohs(rpsc2_acc->port_info[0].index)); 3102 be16_to_cpu(rpsc2_acc->port_info[0].index));
3103 bfa_trc(rport->fcs, 3103 bfa_trc(rport->fcs,
3104 rpsc2_acc->port_info[0].type); 3104 rpsc2_acc->port_info[0].type);
3105 3105
@@ -3109,7 +3109,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
3109 } 3109 }
3110 3110
3111 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( 3111 rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
3112 bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); 3112 be16_to_cpu(rpsc2_acc->port_info[0].speed));
3113 3113
3114 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); 3114 bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
3115 } 3115 }
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index c787d3af088..d8464ae6007 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -22,7 +22,7 @@ void
22bfa_hwcb_reginit(struct bfa_s *bfa) 22bfa_hwcb_reginit(struct bfa_s *bfa)
23{ 23{
24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 24 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
25 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 25 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 26 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
27 27
28 if (fn == 0) { 28 if (fn == 0) {
@@ -60,8 +60,8 @@ bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
60static void 60static void
61bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) 61bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
62{ 62{
63 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, 63 writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
64 __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq)); 64 bfa->iocfc.bfa_regs.intr_status);
65} 65}
66 66
67void 67void
@@ -72,8 +72,8 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
72static void 72static void
73bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) 73bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
74{ 74{
75 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, 75 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
76 __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq)); 76 bfa->iocfc.bfa_regs.intr_status);
77} 77}
78 78
79void 79void
@@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
102 *num_vecs = __HFN_NUMINTS; 102 *num_vecs = __HFN_NUMINTS;
103} 103}
104 104
105/** 105/*
106 * No special setup required for crossbow -- vector assignments are implicit. 106 * No special setup required for crossbow -- vector assignments are implicit.
107 */ 107 */
108void 108void
@@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
129 bfa->msix.handler[i] = bfa_msix_lpu_err; 129 bfa->msix.handler[i] = bfa_msix_lpu_err;
130} 130}
131 131
132/** 132/*
133 * Crossbow -- dummy, interrupts are masked 133 * Crossbow -- dummy, interrupts are masked
134 */ 134 */
135void 135void
@@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
142{ 142{
143} 143}
144 144
145/** 145/*
146 * No special enable/disable -- vector assignments are implicit. 146 * No special enable/disable -- vector assignments are implicit.
147 */ 147 */
148void 148void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index c97ebafec5e..b0efbc713ff 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -31,15 +31,15 @@ static void
31bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec) 31bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
32{ 32{
33 int fn = bfa_ioc_pcifn(&bfa->ioc); 33 int fn = bfa_ioc_pcifn(&bfa->ioc);
34 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 34 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
35 35
36 if (msix) 36 if (msix)
37 bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec); 37 writel(vec, kva + __ct_msix_err_vec_reg[fn]);
38 else 38 else
39 bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0); 39 writel(0, kva + __ct_msix_err_vec_reg[fn]);
40} 40}
41 41
42/** 42/*
43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit. 43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
44 */ 44 */
45static void 45static void
@@ -51,7 +51,7 @@ void
51bfa_hwct_reginit(struct bfa_s *bfa) 51bfa_hwct_reginit(struct bfa_s *bfa)
52{ 52{
53 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 53 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
54 bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); 54 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
55 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 55 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
56 56
57 if (fn == 0) { 57 if (fn == 0) {
@@ -88,8 +88,8 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
88{ 88{
89 u32 r32; 89 u32 r32;
90 90
91 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 91 r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
92 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); 92 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
93} 93}
94 94
95void 95void
@@ -97,8 +97,8 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
97{ 97{
98 u32 r32; 98 u32 r32;
99 99
100 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 100 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
101 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32); 101 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
102} 102}
103 103
104void 104void
@@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
110 *num_vecs = BFA_MSIX_CT_MAX; 110 *num_vecs = BFA_MSIX_CT_MAX;
111} 111}
112 112
113/** 113/*
114 * Setup MSI-X vector for catapult 114 * Setup MSI-X vector for catapult
115 */ 115 */
116void 116void
@@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
156 bfa->msix.handler[i] = bfa_hwct_msix_dummy; 156 bfa->msix.handler[i] = bfa_hwct_msix_dummy;
157} 157}
158 158
159/** 159/*
160 * Enable MSI-X vectors 160 * Enable MSI-X vectors
161 */ 161 */
162void 162void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 6795b247791..54475b53a5a 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -23,7 +23,7 @@
23 23
24BFA_TRC_FILE(CNA, IOC); 24BFA_TRC_FILE(CNA, IOC);
25 25
26/** 26/*
27 * IOC local definitions 27 * IOC local definitions
28 */ 28 */
29#define BFA_IOC_TOV 3000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
@@ -49,7 +49,7 @@ BFA_TRC_FILE(CNA, IOC);
49 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 49 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
50#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 50#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
51 51
52/** 52/*
53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
54 */ 54 */
55 55
@@ -73,7 +73,7 @@ BFA_TRC_FILE(CNA, IOC);
73 73
74#define bfa_ioc_mbox_cmd_pending(__ioc) \ 74#define bfa_ioc_mbox_cmd_pending(__ioc) \
75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
76 bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd)) 76 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
77 77
78bfa_boolean_t bfa_auto_recover = BFA_TRUE; 78bfa_boolean_t bfa_auto_recover = BFA_TRUE;
79 79
@@ -101,11 +101,11 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); 101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
103 103
104/** 104/*
105 * hal_ioc_sm 105 * hal_ioc_sm
106 */ 106 */
107 107
108/** 108/*
109 * IOC state machine definitions/declarations 109 * IOC state machine definitions/declarations
110 */ 110 */
111enum ioc_event { 111enum ioc_event {
@@ -144,7 +144,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
145}; 145};
146 146
147/** 147/*
148 * IOCPF state machine definitions/declarations 148 * IOCPF state machine definitions/declarations
149 */ 149 */
150 150
@@ -174,7 +174,7 @@ static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174static void bfa_iocpf_timeout(void *ioc_arg); 174static void bfa_iocpf_timeout(void *ioc_arg);
175static void bfa_iocpf_sem_timeout(void *ioc_arg); 175static void bfa_iocpf_sem_timeout(void *ioc_arg);
176 176
177/** 177/*
178 * IOCPF state machine events 178 * IOCPF state machine events
179 */ 179 */
180enum iocpf_event { 180enum iocpf_event {
@@ -191,7 +191,7 @@ enum iocpf_event {
191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ 191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
192}; 192};
193 193
194/** 194/*
195 * IOCPF states 195 * IOCPF states
196 */ 196 */
197enum bfa_iocpf_state { 197enum bfa_iocpf_state {
@@ -232,11 +232,11 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
233}; 233};
234 234
235/** 235/*
236 * IOC State Machine 236 * IOC State Machine
237 */ 237 */
238 238
239/** 239/*
240 * Beginning state. IOC uninit state. 240 * Beginning state. IOC uninit state.
241 */ 241 */
242 242
@@ -245,7 +245,7 @@ bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
245{ 245{
246} 246}
247 247
248/** 248/*
249 * IOC is in uninit state. 249 * IOC is in uninit state.
250 */ 250 */
251static void 251static void
@@ -262,7 +262,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
262 bfa_sm_fault(ioc, event); 262 bfa_sm_fault(ioc, event);
263 } 263 }
264} 264}
265/** 265/*
266 * Reset entry actions -- initialize state machine 266 * Reset entry actions -- initialize state machine
267 */ 267 */
268static void 268static void
@@ -271,7 +271,7 @@ bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
272} 272}
273 273
274/** 274/*
275 * IOC is in reset state. 275 * IOC is in reset state.
276 */ 276 */
277static void 277static void
@@ -304,7 +304,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
304 bfa_iocpf_enable(ioc); 304 bfa_iocpf_enable(ioc);
305} 305}
306 306
307/** 307/*
308 * Host IOC function is being enabled, awaiting response from firmware. 308 * Host IOC function is being enabled, awaiting response from firmware.
309 * Semaphore is acquired. 309 * Semaphore is acquired.
310 */ 310 */
@@ -352,7 +352,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
352 bfa_ioc_send_getattr(ioc); 352 bfa_ioc_send_getattr(ioc);
353} 353}
354 354
355/** 355/*
356 * IOC configuration in progress. Timer is active. 356 * IOC configuration in progress. Timer is active.
357 */ 357 */
358static void 358static void
@@ -447,7 +447,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); 447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
448} 448}
449 449
450/** 450/*
451 * IOC is being disabled 451 * IOC is being disabled
452 */ 452 */
453static void 453static void
@@ -474,7 +474,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
474 } 474 }
475} 475}
476 476
477/** 477/*
478 * IOC disable completion entry. 478 * IOC disable completion entry.
479 */ 479 */
480static void 480static void
@@ -514,7 +514,7 @@ bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
515} 515}
516 516
517/** 517/*
518 * Hardware initialization failed. 518 * Hardware initialization failed.
519 */ 519 */
520static void 520static void
@@ -528,7 +528,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
528 break; 528 break;
529 529
530 case IOC_E_FAILED: 530 case IOC_E_FAILED:
531 /** 531 /*
532 * Initialization failure during iocpf init retry. 532 * Initialization failure during iocpf init retry.
533 */ 533 */
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -556,7 +556,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
556 struct bfa_ioc_hbfail_notify_s *notify; 556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
558 558
559 /** 559 /*
560 * Notify driver and common modules registered for notification. 560 * Notify driver and common modules registered for notification.
561 */ 561 */
562 ioc->cbfn->hbfail_cbfn(ioc->bfa); 562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
@@ -569,7 +569,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
569 "Heart Beat of IOC has failed\n"); 569 "Heart Beat of IOC has failed\n");
570} 570}
571 571
572/** 572/*
573 * IOC failure. 573 * IOC failure.
574 */ 574 */
575static void 575static void
@@ -580,7 +580,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
580 switch (event) { 580 switch (event) {
581 581
582 case IOC_E_FAILED: 582 case IOC_E_FAILED:
583 /** 583 /*
584 * Initialization failure during iocpf recovery. 584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!! 585 * !!! Fall through !!!
586 */ 586 */
@@ -608,12 +608,12 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
608 608
609 609
610 610
611/** 611/*
612 * IOCPF State Machine 612 * IOCPF State Machine
613 */ 613 */
614 614
615 615
616/** 616/*
617 * Reset entry actions -- initialize state machine 617 * Reset entry actions -- initialize state machine
618 */ 618 */
619static void 619static void
@@ -623,7 +623,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
623 iocpf->auto_recover = bfa_auto_recover; 623 iocpf->auto_recover = bfa_auto_recover;
624} 624}
625 625
626/** 626/*
627 * Beginning state. IOC is in reset state. 627 * Beginning state. IOC is in reset state.
628 */ 628 */
629static void 629static void
@@ -646,7 +646,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
646 } 646 }
647} 647}
648 648
649/** 649/*
650 * Semaphore should be acquired for version check. 650 * Semaphore should be acquired for version check.
651 */ 651 */
652static void 652static void
@@ -655,7 +655,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
655 bfa_ioc_hw_sem_get(iocpf->ioc); 655 bfa_ioc_hw_sem_get(iocpf->ioc);
656} 656}
657 657
658/** 658/*
659 * Awaiting h/w semaphore to continue with version check. 659 * Awaiting h/w semaphore to continue with version check.
660 */ 660 */
661static void 661static void
@@ -692,7 +692,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
692 } 692 }
693} 693}
694 694
695/** 695/*
696 * Notify enable completion callback. 696 * Notify enable completion callback.
697 */ 697 */
698static void 698static void
@@ -708,7 +708,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
708 bfa_iocpf_timer_start(iocpf->ioc); 708 bfa_iocpf_timer_start(iocpf->ioc);
709} 709}
710 710
711/** 711/*
712 * Awaiting firmware version match. 712 * Awaiting firmware version match.
713 */ 713 */
714static void 714static void
@@ -739,7 +739,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
739 } 739 }
740} 740}
741 741
742/** 742/*
743 * Request for semaphore. 743 * Request for semaphore.
744 */ 744 */
745static void 745static void
@@ -748,7 +748,7 @@ bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
748 bfa_ioc_hw_sem_get(iocpf->ioc); 748 bfa_ioc_hw_sem_get(iocpf->ioc);
749} 749}
750 750
751/** 751/*
752 * Awaiting semaphore for h/w initialzation. 752 * Awaiting semaphore for h/w initialzation.
753 */ 753 */
754static void 754static void
@@ -782,7 +782,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE); 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
783} 783}
784 784
785/** 785/*
786 * Hardware is being initialized. Interrupts are enabled. 786 * Hardware is being initialized. Interrupts are enabled.
787 * Holding hardware semaphore lock. 787 * Holding hardware semaphore lock.
788 */ 788 */
@@ -839,7 +839,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
839 bfa_ioc_send_enable(iocpf->ioc); 839 bfa_ioc_send_enable(iocpf->ioc);
840} 840}
841 841
842/** 842/*
843 * Host IOC function is being enabled, awaiting response from firmware. 843 * Host IOC function is being enabled, awaiting response from firmware.
844 * Semaphore is acquired. 844 * Semaphore is acquired.
845 */ 845 */
@@ -866,8 +866,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
866 case IOCPF_E_TIMEOUT: 866 case IOCPF_E_TIMEOUT:
867 iocpf->retry_count++; 867 iocpf->retry_count++;
868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { 868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, 869 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
870 BFI_IOC_UNINIT);
871 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 870 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
872 break; 871 break;
873 } 872 }
@@ -944,7 +943,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
944 bfa_ioc_send_disable(iocpf->ioc); 943 bfa_ioc_send_disable(iocpf->ioc);
945} 944}
946 945
947/** 946/*
948 * IOC is being disabled 947 * IOC is being disabled
949 */ 948 */
950static void 949static void
@@ -968,7 +967,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
968 */ 967 */
969 968
970 case IOCPF_E_TIMEOUT: 969 case IOCPF_E_TIMEOUT:
971 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 970 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
973 break; 972 break;
974 973
@@ -980,7 +979,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
980 } 979 }
981} 980}
982 981
983/** 982/*
984 * IOC disable completion entry. 983 * IOC disable completion entry.
985 */ 984 */
986static void 985static void
@@ -1018,7 +1017,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1018 bfa_iocpf_timer_start(iocpf->ioc); 1017 bfa_iocpf_timer_start(iocpf->ioc);
1019} 1018}
1020 1019
1021/** 1020/*
1022 * Hardware initialization failed. 1021 * Hardware initialization failed.
1023 */ 1022 */
1024static void 1023static void
@@ -1053,18 +1052,18 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1053static void 1052static void
1054bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1053bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1055{ 1054{
1056 /** 1055 /*
1057 * Mark IOC as failed in hardware and stop firmware. 1056 * Mark IOC as failed in hardware and stop firmware.
1058 */ 1057 */
1059 bfa_ioc_lpu_stop(iocpf->ioc); 1058 bfa_ioc_lpu_stop(iocpf->ioc);
1060 bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); 1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
1061 1060
1062 /** 1061 /*
1063 * Notify other functions on HB failure. 1062 * Notify other functions on HB failure.
1064 */ 1063 */
1065 bfa_ioc_notify_hbfail(iocpf->ioc); 1064 bfa_ioc_notify_hbfail(iocpf->ioc);
1066 1065
1067 /** 1066 /*
1068 * Flush any queued up mailbox requests. 1067 * Flush any queued up mailbox requests.
1069 */ 1068 */
1070 bfa_ioc_mbox_hbfail(iocpf->ioc); 1069 bfa_ioc_mbox_hbfail(iocpf->ioc);
@@ -1073,7 +1072,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1073 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1072 bfa_iocpf_recovery_timer_start(iocpf->ioc);
1074} 1073}
1075 1074
1076/** 1075/*
1077 * IOC is in failed state. 1076 * IOC is in failed state.
1078 */ 1077 */
1079static void 1078static void
@@ -1101,7 +1100,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1101 1100
1102 1101
1103 1102
1104/** 1103/*
1105 * hal_ioc_pvt BFA IOC private functions 1104 * hal_ioc_pvt BFA IOC private functions
1106 */ 1105 */
1107 1106
@@ -1113,7 +1112,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1113 1112
1114 ioc->cbfn->disable_cbfn(ioc->bfa); 1113 ioc->cbfn->disable_cbfn(ioc->bfa);
1115 1114
1116 /** 1115 /*
1117 * Notify common modules registered for notification. 1116 * Notify common modules registered for notification.
1118 */ 1117 */
1119 list_for_each(qe, &ioc->hb_notify_q) { 1118 list_for_each(qe, &ioc->hb_notify_q) {
@@ -1123,18 +1122,18 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1123} 1122}
1124 1123
1125bfa_boolean_t 1124bfa_boolean_t
1126bfa_ioc_sem_get(bfa_os_addr_t sem_reg) 1125bfa_ioc_sem_get(void __iomem *sem_reg)
1127{ 1126{
1128 u32 r32; 1127 u32 r32;
1129 int cnt = 0; 1128 int cnt = 0;
1130#define BFA_SEM_SPINCNT 3000 1129#define BFA_SEM_SPINCNT 3000
1131 1130
1132 r32 = bfa_reg_read(sem_reg); 1131 r32 = readl(sem_reg);
1133 1132
1134 while (r32 && (cnt < BFA_SEM_SPINCNT)) { 1133 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1135 cnt++; 1134 cnt++;
1136 bfa_os_udelay(2); 1135 udelay(2);
1137 r32 = bfa_reg_read(sem_reg); 1136 r32 = readl(sem_reg);
1138 } 1137 }
1139 1138
1140 if (r32 == 0) 1139 if (r32 == 0)
@@ -1145,9 +1144,9 @@ bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
1145} 1144}
1146 1145
1147void 1146void
1148bfa_ioc_sem_release(bfa_os_addr_t sem_reg) 1147bfa_ioc_sem_release(void __iomem *sem_reg)
1149{ 1148{
1150 bfa_reg_write(sem_reg, 1); 1149 writel(1, sem_reg);
1151} 1150}
1152 1151
1153static void 1152static void
@@ -1155,11 +1154,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1155{ 1154{
1156 u32 r32; 1155 u32 r32;
1157 1156
1158 /** 1157 /*
1159 * First read to the semaphore register will return 0, subsequent reads 1158 * First read to the semaphore register will return 0, subsequent reads
1160 * will return 1. Semaphore is released by writing 1 to the register 1159 * will return 1. Semaphore is released by writing 1 to the register
1161 */ 1160 */
1162 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 1161 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1163 if (r32 == 0) { 1162 if (r32 == 0) {
1164 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1163 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1165 return; 1164 return;
@@ -1171,7 +1170,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1171void 1170void
1172bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) 1171bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
1173{ 1172{
1174 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); 1173 writel(1, ioc->ioc_regs.ioc_sem_reg);
1175} 1174}
1176 1175
1177static void 1176static void
@@ -1180,7 +1179,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
1180 bfa_sem_timer_stop(ioc); 1179 bfa_sem_timer_stop(ioc);
1181} 1180}
1182 1181
1183/** 1182/*
1184 * Initialize LPU local memory (aka secondary memory / SRAM) 1183 * Initialize LPU local memory (aka secondary memory / SRAM)
1185 */ 1184 */
1186static void 1185static void
@@ -1190,7 +1189,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1190 int i; 1189 int i;
1191#define PSS_LMEM_INIT_TIME 10000 1190#define PSS_LMEM_INIT_TIME 10000
1192 1191
1193 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1192 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1194 pss_ctl &= ~__PSS_LMEM_RESET; 1193 pss_ctl &= ~__PSS_LMEM_RESET;
1195 pss_ctl |= __PSS_LMEM_INIT_EN; 1194 pss_ctl |= __PSS_LMEM_INIT_EN;
1196 1195
@@ -1198,18 +1197,18 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1198 * i2c workaround 12.5khz clock 1197 * i2c workaround 12.5khz clock
1199 */ 1198 */
1200 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1199 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1201 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1200 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1202 1201
1203 /** 1202 /*
1204 * wait for memory initialization to be complete 1203 * wait for memory initialization to be complete
1205 */ 1204 */
1206 i = 0; 1205 i = 0;
1207 do { 1206 do {
1208 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1207 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1209 i++; 1208 i++;
1210 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1209 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1211 1210
1212 /** 1211 /*
1213 * If memory initialization is not successful, IOC timeout will catch 1212 * If memory initialization is not successful, IOC timeout will catch
1214 * such failures. 1213 * such failures.
1215 */ 1214 */
@@ -1217,7 +1216,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1217 bfa_trc(ioc, pss_ctl); 1216 bfa_trc(ioc, pss_ctl);
1218 1217
1219 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); 1218 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1220 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1219 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1221} 1220}
1222 1221
1223static void 1222static void
@@ -1225,13 +1224,13 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1225{ 1224{
1226 u32 pss_ctl; 1225 u32 pss_ctl;
1227 1226
1228 /** 1227 /*
1229 * Take processor out of reset. 1228 * Take processor out of reset.
1230 */ 1229 */
1231 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1230 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1232 pss_ctl &= ~__PSS_LPU0_RESET; 1231 pss_ctl &= ~__PSS_LPU0_RESET;
1233 1232
1234 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1233 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1235} 1234}
1236 1235
1237static void 1236static void
@@ -1239,16 +1238,16 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1239{ 1238{
1240 u32 pss_ctl; 1239 u32 pss_ctl;
1241 1240
1242 /** 1241 /*
1243 * Put processors in reset. 1242 * Put processors in reset.
1244 */ 1243 */
1245 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); 1244 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1246 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); 1245 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1247 1246
1248 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); 1247 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1249} 1248}
1250 1249
1251/** 1250/*
1252 * Get driver and firmware versions. 1251 * Get driver and firmware versions.
1253 */ 1252 */
1254void 1253void
@@ -1261,7 +1260,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1261 1260
1262 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1261 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1263 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1262 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1264 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1263 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1265 1264
1266 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); 1265 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1267 i++) { 1266 i++) {
@@ -1271,7 +1270,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1271 } 1270 }
1272} 1271}
1273 1272
1274/** 1273/*
1275 * Returns TRUE if same. 1274 * Returns TRUE if same.
1276 */ 1275 */
1277bfa_boolean_t 1276bfa_boolean_t
@@ -1296,7 +1295,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1296 return BFA_TRUE; 1295 return BFA_TRUE;
1297} 1296}
1298 1297
1299/** 1298/*
1300 * Return true if current running version is valid. Firmware signature and 1299 * Return true if current running version is valid. Firmware signature and
1301 * execution context (driver/bios) must match. 1300 * execution context (driver/bios) must match.
1302 */ 1301 */
@@ -1305,7 +1304,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1305{ 1304{
1306 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1307 1306
1308 /** 1307 /*
1309 * If bios/efi boot (flash based) -- return true 1308 * If bios/efi boot (flash based) -- return true
1310 */ 1309 */
1311 if (bfa_ioc_is_bios_optrom(ioc)) 1310 if (bfa_ioc_is_bios_optrom(ioc))
@@ -1321,7 +1320,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1321 return BFA_FALSE; 1320 return BFA_FALSE;
1322 } 1321 }
1323 1322
1324 if (bfa_os_swap32(fwhdr.param) != boot_env) { 1323 if (swab32(fwhdr.param) != boot_env) {
1325 bfa_trc(ioc, fwhdr.param); 1324 bfa_trc(ioc, fwhdr.param);
1326 bfa_trc(ioc, boot_env); 1325 bfa_trc(ioc, boot_env);
1327 return BFA_FALSE; 1326 return BFA_FALSE;
@@ -1330,7 +1329,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1330 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 1329 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1331} 1330}
1332 1331
1333/** 1332/*
1334 * Conditionally flush any pending message from firmware at start. 1333 * Conditionally flush any pending message from firmware at start.
1335 */ 1334 */
1336static void 1335static void
@@ -1338,9 +1337,9 @@ bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1338{ 1337{
1339 u32 r32; 1338 u32 r32;
1340 1339
1341 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1340 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1342 if (r32) 1341 if (r32)
1343 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); 1342 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1344} 1343}
1345 1344
1346 1345
@@ -1352,7 +1351,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1352 u32 boot_type; 1351 u32 boot_type;
1353 u32 boot_env; 1352 u32 boot_env;
1354 1353
1355 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 1354 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1356 1355
1357 if (force) 1356 if (force)
1358 ioc_fwstate = BFI_IOC_UNINIT; 1357 ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,7 +1361,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1362 boot_type = BFI_BOOT_TYPE_NORMAL; 1361 boot_type = BFI_BOOT_TYPE_NORMAL;
1363 boot_env = BFI_BOOT_LOADER_OS; 1362 boot_env = BFI_BOOT_LOADER_OS;
1364 1363
1365 /** 1364 /*
1366 * Flash based firmware boot BIOS env. 1365 * Flash based firmware boot BIOS env.
1367 */ 1366 */
1368 if (bfa_ioc_is_bios_optrom(ioc)) { 1367 if (bfa_ioc_is_bios_optrom(ioc)) {
@@ -1370,7 +1369,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1370 boot_env = BFI_BOOT_LOADER_BIOS; 1369 boot_env = BFI_BOOT_LOADER_BIOS;
1371 } 1370 }
1372 1371
1373 /** 1372 /*
1374 * Flash based firmware boot UEFI env. 1373 * Flash based firmware boot UEFI env.
1375 */ 1374 */
1376 if (bfa_ioc_is_uefi(ioc)) { 1375 if (bfa_ioc_is_uefi(ioc)) {
@@ -1378,7 +1377,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1378 boot_env = BFI_BOOT_LOADER_UEFI; 1377 boot_env = BFI_BOOT_LOADER_UEFI;
1379 } 1378 }
1380 1379
1381 /** 1380 /*
1382 * check if firmware is valid 1381 * check if firmware is valid
1383 */ 1382 */
1384 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1389,7 +1388,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1389 return; 1388 return;
1390 } 1389 }
1391 1390
1392 /** 1391 /*
1393 * If hardware initialization is in progress (initialized by other IOC), 1392 * If hardware initialization is in progress (initialized by other IOC),
1394 * just wait for an initialization completion interrupt. 1393 * just wait for an initialization completion interrupt.
1395 */ 1394 */
@@ -1398,7 +1397,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1398 return; 1397 return;
1399 } 1398 }
1400 1399
1401 /** 1400 /*
1402 * If IOC function is disabled and firmware version is same, 1401 * If IOC function is disabled and firmware version is same,
1403 * just re-enable IOC. 1402 * just re-enable IOC.
1404 * 1403 *
@@ -1409,7 +1408,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1409 if (ioc_fwstate == BFI_IOC_DISABLED || 1408 if (ioc_fwstate == BFI_IOC_DISABLED ||
1410 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
1411 1410
1412 /** 1411 /*
1413 * When using MSI-X any pending firmware ready event should 1412 * When using MSI-X any pending firmware ready event should
1414 * be flushed. Otherwise MSI-X interrupts are not delivered. 1413 * be flushed. Otherwise MSI-X interrupts are not delivered.
1415 */ 1414 */
@@ -1419,7 +1418,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1419 return; 1418 return;
1420 } 1419 }
1421 1420
1422 /** 1421 /*
1423 * Initialize the h/w for any other states. 1422 * Initialize the h/w for any other states.
1424 */ 1423 */
1425 bfa_ioc_boot(ioc, boot_type, boot_env); 1424 bfa_ioc_boot(ioc, boot_type, boot_env);
@@ -1449,17 +1448,17 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1449 * first write msg to mailbox registers 1448 * first write msg to mailbox registers
1450 */ 1449 */
1451 for (i = 0; i < len / sizeof(u32); i++) 1450 for (i = 0; i < len / sizeof(u32); i++)
1452 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 1451 writel(cpu_to_le32(msgp[i]),
1453 bfa_os_wtole(msgp[i])); 1452 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1454 1453
1455 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) 1454 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1456 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0); 1455 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1457 1456
1458 /* 1457 /*
1459 * write 1 to mailbox CMD to trigger LPU event 1458 * write 1 to mailbox CMD to trigger LPU event
1460 */ 1459 */
1461 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); 1460 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1462 (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1461 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1463} 1462}
1464 1463
1465static void 1464static void
@@ -1472,7 +1471,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1472 bfa_ioc_portid(ioc)); 1471 bfa_ioc_portid(ioc));
1473 enable_req.ioc_class = ioc->ioc_mc; 1472 enable_req.ioc_class = ioc->ioc_mc;
1474 bfa_os_gettimeofday(&tv); 1473 bfa_os_gettimeofday(&tv);
1475 enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec); 1474 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1476 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1475 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1477} 1476}
1478 1477
@@ -1503,7 +1502,7 @@ bfa_ioc_hb_check(void *cbarg)
1503 struct bfa_ioc_s *ioc = cbarg; 1502 struct bfa_ioc_s *ioc = cbarg;
1504 u32 hb_count; 1503 u32 hb_count;
1505 1504
1506 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1505 hb_count = readl(ioc->ioc_regs.heartbeat);
1507 if (ioc->hb_count == hb_count) { 1506 if (ioc->hb_count == hb_count) {
1508 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count); 1507 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1509 bfa_ioc_recover(ioc); 1508 bfa_ioc_recover(ioc);
@@ -1519,7 +1518,7 @@ bfa_ioc_hb_check(void *cbarg)
1519static void 1518static void
1520bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1519bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1521{ 1520{
1522 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1521 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1523 bfa_hb_timer_start(ioc); 1522 bfa_hb_timer_start(ioc);
1524} 1523}
1525 1524
@@ -1530,7 +1529,7 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1530} 1529}
1531 1530
1532 1531
1533/** 1532/*
1534 * Initiate a full firmware download. 1533 * Initiate a full firmware download.
1535 */ 1534 */
1536static void 1535static void
@@ -1543,7 +1542,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1543 u32 chunkno = 0; 1542 u32 chunkno = 0;
1544 u32 i; 1543 u32 i;
1545 1544
1546 /** 1545 /*
1547 * Initialize LMEM first before code download 1546 * Initialize LMEM first before code download
1548 */ 1547 */
1549 bfa_ioc_lmem_init(ioc); 1548 bfa_ioc_lmem_init(ioc);
@@ -1554,7 +1553,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1554 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1553 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1555 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1554 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1556 1555
1557 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1556 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1558 1557
1559 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { 1558 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1560 1559
@@ -1564,7 +1563,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1564 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1563 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1565 } 1564 }
1566 1565
1567 /** 1566 /*
1568 * write smem 1567 * write smem
1569 */ 1568 */
1570 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1569 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
@@ -1572,27 +1571,25 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1572 1571
1573 loff += sizeof(u32); 1572 loff += sizeof(u32);
1574 1573
1575 /** 1574 /*
1576 * handle page offset wrap around 1575 * handle page offset wrap around
1577 */ 1576 */
1578 loff = PSS_SMEM_PGOFF(loff); 1577 loff = PSS_SMEM_PGOFF(loff);
1579 if (loff == 0) { 1578 if (loff == 0) {
1580 pgnum++; 1579 pgnum++;
1581 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1580 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1582 pgnum);
1583 } 1581 }
1584 } 1582 }
1585 1583
1586 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1584 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1587 bfa_ioc_smem_pgnum(ioc, 0));
1588 1585
1589 /* 1586 /*
1590 * Set boot type and boot param at the end. 1587 * Set boot type and boot param at the end.
1591 */ 1588 */
1592 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1589 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1593 bfa_os_swap32(boot_type)); 1590 swab32(boot_type));
1594 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, 1591 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1595 bfa_os_swap32(boot_env)); 1592 swab32(boot_env));
1596} 1593}
1597 1594
1598static void 1595static void
@@ -1601,7 +1598,7 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1601 bfa_ioc_hwinit(ioc, force); 1598 bfa_ioc_hwinit(ioc, force);
1602} 1599}
1603 1600
1604/** 1601/*
1605 * Update BFA configuration from firmware configuration. 1602 * Update BFA configuration from firmware configuration.
1606 */ 1603 */
1607static void 1604static void
@@ -1609,14 +1606,14 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1609{ 1606{
1610 struct bfi_ioc_attr_s *attr = ioc->attr; 1607 struct bfi_ioc_attr_s *attr = ioc->attr;
1611 1608
1612 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); 1609 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1613 attr->card_type = bfa_os_ntohl(attr->card_type); 1610 attr->card_type = be32_to_cpu(attr->card_type);
1614 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); 1611 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1615 1612
1616 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1613 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1617} 1614}
1618 1615
1619/** 1616/*
1620 * Attach time initialization of mbox logic. 1617 * Attach time initialization of mbox logic.
1621 */ 1618 */
1622static void 1619static void
@@ -1632,7 +1629,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1632 } 1629 }
1633} 1630}
1634 1631
1635/** 1632/*
1636 * Mbox poll timer -- restarts any pending mailbox requests. 1633 * Mbox poll timer -- restarts any pending mailbox requests.
1637 */ 1634 */
1638static void 1635static void
@@ -1642,27 +1639,27 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1642 struct bfa_mbox_cmd_s *cmd; 1639 struct bfa_mbox_cmd_s *cmd;
1643 u32 stat; 1640 u32 stat;
1644 1641
1645 /** 1642 /*
1646 * If no command pending, do nothing 1643 * If no command pending, do nothing
1647 */ 1644 */
1648 if (list_empty(&mod->cmd_q)) 1645 if (list_empty(&mod->cmd_q))
1649 return; 1646 return;
1650 1647
1651 /** 1648 /*
1652 * If previous command is not yet fetched by firmware, do nothing 1649 * If previous command is not yet fetched by firmware, do nothing
1653 */ 1650 */
1654 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 1651 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1655 if (stat) 1652 if (stat)
1656 return; 1653 return;
1657 1654
1658 /** 1655 /*
1659 * Enqueue command to firmware. 1656 * Enqueue command to firmware.
1660 */ 1657 */
1661 bfa_q_deq(&mod->cmd_q, &cmd); 1658 bfa_q_deq(&mod->cmd_q, &cmd);
1662 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1659 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1663} 1660}
1664 1661
1665/** 1662/*
1666 * Cleanup any pending requests. 1663 * Cleanup any pending requests.
1667 */ 1664 */
1668static void 1665static void
@@ -1675,7 +1672,7 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1675 bfa_q_deq(&mod->cmd_q, &cmd); 1672 bfa_q_deq(&mod->cmd_q, &cmd);
1676} 1673}
1677 1674
1678/** 1675/*
1679 * Read data from SMEM to host through PCI memmap 1676 * Read data from SMEM to host through PCI memmap
1680 * 1677 *
1681 * @param[in] ioc memory for IOC 1678 * @param[in] ioc memory for IOC
@@ -1704,26 +1701,25 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1704 return BFA_STATUS_FAILED; 1701 return BFA_STATUS_FAILED;
1705 } 1702 }
1706 1703
1707 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1704 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1708 1705
1709 len = sz/sizeof(u32); 1706 len = sz/sizeof(u32);
1710 bfa_trc(ioc, len); 1707 bfa_trc(ioc, len);
1711 for (i = 0; i < len; i++) { 1708 for (i = 0; i < len; i++) {
1712 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); 1709 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1713 buf[i] = bfa_os_ntohl(r32); 1710 buf[i] = be32_to_cpu(r32);
1714 loff += sizeof(u32); 1711 loff += sizeof(u32);
1715 1712
1716 /** 1713 /*
1717 * handle page offset wrap around 1714 * handle page offset wrap around
1718 */ 1715 */
1719 loff = PSS_SMEM_PGOFF(loff); 1716 loff = PSS_SMEM_PGOFF(loff);
1720 if (loff == 0) { 1717 if (loff == 0) {
1721 pgnum++; 1718 pgnum++;
1722 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1719 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1723 } 1720 }
1724 } 1721 }
1725 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1722 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1726 bfa_ioc_smem_pgnum(ioc, 0));
1727 /* 1723 /*
1728 * release semaphore. 1724 * release semaphore.
1729 */ 1725 */
@@ -1733,7 +1729,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1733 return BFA_STATUS_OK; 1729 return BFA_STATUS_OK;
1734} 1730}
1735 1731
1736/** 1732/*
1737 * Clear SMEM data from host through PCI memmap 1733 * Clear SMEM data from host through PCI memmap
1738 * 1734 *
1739 * @param[in] ioc memory for IOC 1735 * @param[in] ioc memory for IOC
@@ -1760,7 +1756,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1760 return BFA_STATUS_FAILED; 1756 return BFA_STATUS_FAILED;
1761 } 1757 }
1762 1758
1763 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1759 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1764 1760
1765 len = sz/sizeof(u32); /* len in words */ 1761 len = sz/sizeof(u32); /* len in words */
1766 bfa_trc(ioc, len); 1762 bfa_trc(ioc, len);
@@ -1768,17 +1764,16 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1768 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); 1764 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1769 loff += sizeof(u32); 1765 loff += sizeof(u32);
1770 1766
1771 /** 1767 /*
1772 * handle page offset wrap around 1768 * handle page offset wrap around
1773 */ 1769 */
1774 loff = PSS_SMEM_PGOFF(loff); 1770 loff = PSS_SMEM_PGOFF(loff);
1775 if (loff == 0) { 1771 if (loff == 0) {
1776 pgnum++; 1772 pgnum++;
1777 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 1773 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1778 } 1774 }
1779 } 1775 }
1780 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1776 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1781 bfa_ioc_smem_pgnum(ioc, 0));
1782 1777
1783 /* 1778 /*
1784 * release semaphore. 1779 * release semaphore.
@@ -1788,7 +1783,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1788 return BFA_STATUS_OK; 1783 return BFA_STATUS_OK;
1789} 1784}
1790 1785
1791/** 1786/*
1792 * hal iocpf to ioc interface 1787 * hal iocpf to ioc interface
1793 */ 1788 */
1794static void 1789static void
@@ -1813,7 +1808,7 @@ static void
1813bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) 1808bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1814{ 1809{
1815 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 1810 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1816 /** 1811 /*
1817 * Provide enable completion callback. 1812 * Provide enable completion callback.
1818 */ 1813 */
1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -1824,7 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1824 1819
1825 1820
1826 1821
1827/** 1822/*
1828 * hal_ioc_public 1823 * hal_ioc_public
1829 */ 1824 */
1830 1825
@@ -1848,43 +1843,43 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1848 return BFA_STATUS_OK; 1843 return BFA_STATUS_OK;
1849} 1844}
1850 1845
1851/** 1846/*
1852 * Interface used by diag module to do firmware boot with memory test 1847 * Interface used by diag module to do firmware boot with memory test
1853 * as the entry vector. 1848 * as the entry vector.
1854 */ 1849 */
1855void 1850void
1856bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) 1851bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1857{ 1852{
1858 bfa_os_addr_t rb; 1853 void __iomem *rb;
1859 1854
1860 bfa_ioc_stats(ioc, ioc_boots); 1855 bfa_ioc_stats(ioc, ioc_boots);
1861 1856
1862 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1857 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1863 return; 1858 return;
1864 1859
1865 /** 1860 /*
1866 * Initialize IOC state of all functions on a chip reset. 1861 * Initialize IOC state of all functions on a chip reset.
1867 */ 1862 */
1868 rb = ioc->pcidev.pci_bar_kva; 1863 rb = ioc->pcidev.pci_bar_kva;
1869 if (boot_type == BFI_BOOT_TYPE_MEMTEST) { 1864 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1870 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); 1865 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1871 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); 1866 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1872 } else { 1867 } else {
1873 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING); 1868 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1874 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING); 1869 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1875 } 1870 }
1876 1871
1877 bfa_ioc_msgflush(ioc); 1872 bfa_ioc_msgflush(ioc);
1878 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1873 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1879 1874
1880 /** 1875 /*
1881 * Enable interrupts just before starting LPU 1876 * Enable interrupts just before starting LPU
1882 */ 1877 */
1883 ioc->cbfn->reset_cbfn(ioc->bfa); 1878 ioc->cbfn->reset_cbfn(ioc->bfa);
1884 bfa_ioc_lpu_start(ioc); 1879 bfa_ioc_lpu_start(ioc);
1885} 1880}
1886 1881
1887/** 1882/*
1888 * Enable/disable IOC failure auto recovery. 1883 * Enable/disable IOC failure auto recovery.
1889 */ 1884 */
1890void 1885void
@@ -1904,7 +1899,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1904bfa_boolean_t 1899bfa_boolean_t
1905bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) 1900bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1906{ 1901{
1907 u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 1902 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1908 1903
1909 return ((r32 != BFI_IOC_UNINIT) && 1904 return ((r32 != BFI_IOC_UNINIT) &&
1910 (r32 != BFI_IOC_INITING) && 1905 (r32 != BFI_IOC_INITING) &&
@@ -1918,21 +1913,21 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1918 u32 r32; 1913 u32 r32;
1919 int i; 1914 int i;
1920 1915
1921 /** 1916 /*
1922 * read the MBOX msg 1917 * read the MBOX msg
1923 */ 1918 */
1924 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 1919 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1925 i++) { 1920 i++) {
1926 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox + 1921 r32 = readl(ioc->ioc_regs.lpu_mbox +
1927 i * sizeof(u32)); 1922 i * sizeof(u32));
1928 msgp[i] = bfa_os_htonl(r32); 1923 msgp[i] = cpu_to_be32(r32);
1929 } 1924 }
1930 1925
1931 /** 1926 /*
1932 * turn off mailbox interrupt by clearing mailbox status 1927 * turn off mailbox interrupt by clearing mailbox status
1933 */ 1928 */
1934 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); 1929 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1935 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); 1930 readl(ioc->ioc_regs.lpu_mbox_cmd);
1936} 1931}
1937 1932
1938void 1933void
@@ -1971,7 +1966,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1971 } 1966 }
1972} 1967}
1973 1968
1974/** 1969/*
1975 * IOC attach time initialization and setup. 1970 * IOC attach time initialization and setup.
1976 * 1971 *
1977 * @param[in] ioc memory for IOC 1972 * @param[in] ioc memory for IOC
@@ -1996,7 +1991,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1996 bfa_fsm_send_event(ioc, IOC_E_RESET); 1991 bfa_fsm_send_event(ioc, IOC_E_RESET);
1997} 1992}
1998 1993
1999/** 1994/*
2000 * Driver detach time IOC cleanup. 1995 * Driver detach time IOC cleanup.
2001 */ 1996 */
2002void 1997void
@@ -2005,7 +2000,7 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
2005 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2000 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2006} 2001}
2007 2002
2008/** 2003/*
2009 * Setup IOC PCI properties. 2004 * Setup IOC PCI properties.
2010 * 2005 *
2011 * @param[in] pcidev PCI device information for this IOC 2006 * @param[in] pcidev PCI device information for this IOC
@@ -2019,7 +2014,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2019 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2014 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2020 ioc->cna = ioc->ctdev && !ioc->fcmode; 2015 ioc->cna = ioc->ctdev && !ioc->fcmode;
2021 2016
2022 /** 2017 /*
2023 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2018 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2024 */ 2019 */
2025 if (ioc->ctdev) 2020 if (ioc->ctdev)
@@ -2031,7 +2026,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2031 bfa_ioc_reg_init(ioc); 2026 bfa_ioc_reg_init(ioc);
2032} 2027}
2033 2028
2034/** 2029/*
2035 * Initialize IOC dma memory 2030 * Initialize IOC dma memory
2036 * 2031 *
2037 * @param[in] dm_kva kernel virtual address of IOC dma memory 2032 * @param[in] dm_kva kernel virtual address of IOC dma memory
@@ -2040,7 +2035,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2040void 2035void
2041bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2036bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2042{ 2037{
2043 /** 2038 /*
2044 * dma memory for firmware attribute 2039 * dma memory for firmware attribute
2045 */ 2040 */
2046 ioc->attr_dma.kva = dm_kva; 2041 ioc->attr_dma.kva = dm_kva;
@@ -2048,7 +2043,7 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2048 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; 2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2049} 2044}
2050 2045
2051/** 2046/*
2052 * Return size of dma memory required. 2047 * Return size of dma memory required.
2053 */ 2048 */
2054u32 2049u32
@@ -2073,7 +2068,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
2073 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2074} 2069}
2075 2070
2076/** 2071/*
2077 * Returns memory required for saving firmware trace in case of crash. 2072 * Returns memory required for saving firmware trace in case of crash.
2078 * Driver must call this interface to allocate memory required for 2073 * Driver must call this interface to allocate memory required for
2079 * automatic saving of firmware trace. Driver should call 2074 * automatic saving of firmware trace. Driver should call
@@ -2086,7 +2081,7 @@ bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
2086 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2087} 2082}
2088 2083
2089/** 2084/*
2090 * Initialize memory for saving firmware trace. Driver must initialize 2085 * Initialize memory for saving firmware trace. Driver must initialize
2091 * trace memory before call bfa_ioc_enable(). 2086 * trace memory before call bfa_ioc_enable().
2092 */ 2087 */
@@ -2109,7 +2104,7 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
2109 return PSS_SMEM_PGOFF(fmaddr); 2104 return PSS_SMEM_PGOFF(fmaddr);
2110} 2105}
2111 2106
2112/** 2107/*
2113 * Register mailbox message handler functions 2108 * Register mailbox message handler functions
2114 * 2109 *
2115 * @param[in] ioc IOC instance 2110 * @param[in] ioc IOC instance
@@ -2125,7 +2120,7 @@ bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2125 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2120 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2126} 2121}
2127 2122
2128/** 2123/*
2129 * Register mailbox message handler function, to be called by common modules 2124 * Register mailbox message handler function, to be called by common modules
2130 */ 2125 */
2131void 2126void
@@ -2138,7 +2133,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2138 mod->mbhdlr[mc].cbarg = cbarg; 2133 mod->mbhdlr[mc].cbarg = cbarg;
2139} 2134}
2140 2135
2141/** 2136/*
2142 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2137 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2143 * Responsibility of caller to serialize 2138 * Responsibility of caller to serialize
2144 * 2139 *
@@ -2151,7 +2146,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2151 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2146 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2152 u32 stat; 2147 u32 stat;
2153 2148
2154 /** 2149 /*
2155 * If a previous command is pending, queue new command 2150 * If a previous command is pending, queue new command
2156 */ 2151 */
2157 if (!list_empty(&mod->cmd_q)) { 2152 if (!list_empty(&mod->cmd_q)) {
@@ -2159,22 +2154,22 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2159 return; 2154 return;
2160 } 2155 }
2161 2156
2162 /** 2157 /*
2163 * If mailbox is busy, queue command for poll timer 2158 * If mailbox is busy, queue command for poll timer
2164 */ 2159 */
2165 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); 2160 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2166 if (stat) { 2161 if (stat) {
2167 list_add_tail(&cmd->qe, &mod->cmd_q); 2162 list_add_tail(&cmd->qe, &mod->cmd_q);
2168 return; 2163 return;
2169 } 2164 }
2170 2165
2171 /** 2166 /*
2172 * mailbox is free -- queue command to firmware 2167 * mailbox is free -- queue command to firmware
2173 */ 2168 */
2174 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2169 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2175} 2170}
2176 2171
2177/** 2172/*
2178 * Handle mailbox interrupts 2173 * Handle mailbox interrupts
2179 */ 2174 */
2180void 2175void
@@ -2186,7 +2181,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2186 2181
2187 bfa_ioc_msgget(ioc, &m); 2182 bfa_ioc_msgget(ioc, &m);
2188 2183
2189 /** 2184 /*
2190 * Treat IOC message class as special. 2185 * Treat IOC message class as special.
2191 */ 2186 */
2192 mc = m.mh.msg_class; 2187 mc = m.mh.msg_class;
@@ -2214,7 +2209,7 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2214 ioc->port_id = bfa_ioc_pcifn(ioc); 2209 ioc->port_id = bfa_ioc_pcifn(ioc);
2215} 2210}
2216 2211
2217/** 2212/*
2218 * return true if IOC is disabled 2213 * return true if IOC is disabled
2219 */ 2214 */
2220bfa_boolean_t 2215bfa_boolean_t
@@ -2224,7 +2219,7 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2224 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2219 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2225} 2220}
2226 2221
2227/** 2222/*
2228 * return true if IOC firmware is different. 2223 * return true if IOC firmware is different.
2229 */ 2224 */
2230bfa_boolean_t 2225bfa_boolean_t
@@ -2243,7 +2238,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2243 ((__sm) == BFI_IOC_FAIL) || \ 2238 ((__sm) == BFI_IOC_FAIL) || \
2244 ((__sm) == BFI_IOC_CFG_DISABLED)) 2239 ((__sm) == BFI_IOC_CFG_DISABLED))
2245 2240
2246/** 2241/*
2247 * Check if adapter is disabled -- both IOCs should be in a disabled 2242 * Check if adapter is disabled -- both IOCs should be in a disabled
2248 * state. 2243 * state.
2249 */ 2244 */
@@ -2251,17 +2246,17 @@ bfa_boolean_t
2251bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) 2246bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2252{ 2247{
2253 u32 ioc_state; 2248 u32 ioc_state;
2254 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 2249 void __iomem *rb = ioc->pcidev.pci_bar_kva;
2255 2250
2256 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2251 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2257 return BFA_FALSE; 2252 return BFA_FALSE;
2258 2253
2259 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG); 2254 ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2260 if (!bfa_ioc_state_disabled(ioc_state)) 2255 if (!bfa_ioc_state_disabled(ioc_state))
2261 return BFA_FALSE; 2256 return BFA_FALSE;
2262 2257
2263 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { 2258 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2264 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); 2259 ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2265 if (!bfa_ioc_state_disabled(ioc_state)) 2260 if (!bfa_ioc_state_disabled(ioc_state))
2266 return BFA_FALSE; 2261 return BFA_FALSE;
2267 } 2262 }
@@ -2269,7 +2264,7 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2269 return BFA_TRUE; 2264 return BFA_TRUE;
2270} 2265}
2271 2266
2272/** 2267/*
2273 * Add to IOC heartbeat failure notification queue. To be used by common 2268 * Add to IOC heartbeat failure notification queue. To be used by common
2274 * modules such as cee, port, diag. 2269 * modules such as cee, port, diag.
2275 */ 2270 */
@@ -2293,7 +2288,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2293 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); 2288 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2294 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); 2289 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2295 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); 2290 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2296 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, 2291 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2297 sizeof(struct bfa_mfg_vpd_s)); 2292 sizeof(struct bfa_mfg_vpd_s));
2298 2293
2299 ad_attr->nports = bfa_ioc_get_nports(ioc); 2294 ad_attr->nports = bfa_ioc_get_nports(ioc);
@@ -2343,8 +2338,8 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2343void 2338void
2344bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) 2339bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2345{ 2340{
2346 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); 2341 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2347 bfa_os_memcpy((void *)serial_num, 2342 memcpy((void *)serial_num,
2348 (void *)ioc->attr->brcd_serialnum, 2343 (void *)ioc->attr->brcd_serialnum,
2349 BFA_ADAPTER_SERIAL_NUM_LEN); 2344 BFA_ADAPTER_SERIAL_NUM_LEN);
2350} 2345}
@@ -2352,8 +2347,8 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2352void 2347void
2353bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) 2348bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2354{ 2349{
2355 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); 2350 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2356 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2351 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2357} 2352}
2358 2353
2359void 2354void
@@ -2361,7 +2356,7 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2361{ 2356{
2362 bfa_assert(chip_rev); 2357 bfa_assert(chip_rev);
2363 2358
2364 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); 2359 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2365 2360
2366 chip_rev[0] = 'R'; 2361 chip_rev[0] = 'R';
2367 chip_rev[1] = 'e'; 2362 chip_rev[1] = 'e';
@@ -2374,16 +2369,16 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2374void 2369void
2375bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) 2370bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2376{ 2371{
2377 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); 2372 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2378 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, 2373 memcpy(optrom_ver, ioc->attr->optrom_version,
2379 BFA_VERSION_LEN); 2374 BFA_VERSION_LEN);
2380} 2375}
2381 2376
2382void 2377void
2383bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) 2378bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2384{ 2379{
2385 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 2380 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2386 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2381 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2387} 2382}
2388 2383
2389void 2384void
@@ -2392,14 +2387,14 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2392 struct bfi_ioc_attr_s *ioc_attr; 2387 struct bfi_ioc_attr_s *ioc_attr;
2393 2388
2394 bfa_assert(model); 2389 bfa_assert(model);
2395 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); 2390 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2396 2391
2397 ioc_attr = ioc->attr; 2392 ioc_attr = ioc->attr;
2398 2393
2399 /** 2394 /*
2400 * model name 2395 * model name
2401 */ 2396 */
2402 bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2397 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2403 BFA_MFG_NAME, ioc_attr->card_type); 2398 BFA_MFG_NAME, ioc_attr->card_type);
2404} 2399}
2405 2400
@@ -2446,7 +2441,7 @@ bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2446void 2441void
2447bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) 2442bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2448{ 2443{
2449 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 2444 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2450 2445
2451 ioc_attr->state = bfa_ioc_get_state(ioc); 2446 ioc_attr->state = bfa_ioc_get_state(ioc);
2452 ioc_attr->port_id = ioc->port_id; 2447 ioc_attr->port_id = ioc->port_id;
@@ -2460,7 +2455,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2460 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2461} 2456}
2462 2457
2463/** 2458/*
2464 * hal_wwn_public 2459 * hal_wwn_public
2465 */ 2460 */
2466wwn_t 2461wwn_t
@@ -2526,7 +2521,7 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2526 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); 2521 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2527} 2522}
2528 2523
2529/** 2524/*
2530 * Retrieve saved firmware trace from a prior IOC failure. 2525 * Retrieve saved firmware trace from a prior IOC failure.
2531 */ 2526 */
2532bfa_status_t 2527bfa_status_t
@@ -2541,12 +2536,12 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2541 if (tlen > ioc->dbg_fwsave_len) 2536 if (tlen > ioc->dbg_fwsave_len)
2542 tlen = ioc->dbg_fwsave_len; 2537 tlen = ioc->dbg_fwsave_len;
2543 2538
2544 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen); 2539 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2545 *trclen = tlen; 2540 *trclen = tlen;
2546 return BFA_STATUS_OK; 2541 return BFA_STATUS_OK;
2547} 2542}
2548 2543
2549/** 2544/*
2550 * Clear saved firmware trace 2545 * Clear saved firmware trace
2551 */ 2546 */
2552void 2547void
@@ -2555,7 +2550,7 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
2555 ioc->dbg_fwsave_once = BFA_TRUE; 2550 ioc->dbg_fwsave_once = BFA_TRUE;
2556} 2551}
2557 2552
2558/** 2553/*
2559 * Retrieve saved firmware trace from a prior IOC failure. 2554 * Retrieve saved firmware trace from a prior IOC failure.
2560 */ 2555 */
2561bfa_status_t 2556bfa_status_t
@@ -2595,7 +2590,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2595 2590
2596 bfa_ioc_send_fwsync(ioc); 2591 bfa_ioc_send_fwsync(ioc);
2597 2592
2598 /** 2593 /*
2599 * After sending a fw sync mbox command wait for it to 2594 * After sending a fw sync mbox command wait for it to
2600 * take effect. We will not wait for a response because 2595 * take effect. We will not wait for a response because
2601 * 1. fw_sync mbox cmd doesn't have a response. 2596 * 1. fw_sync mbox cmd doesn't have a response.
@@ -2610,7 +2605,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2610 fwsync_iter--; 2605 fwsync_iter--;
2611} 2606}
2612 2607
2613/** 2608/*
2614 * Dump firmware smem 2609 * Dump firmware smem
2615 */ 2610 */
2616bfa_status_t 2611bfa_status_t
@@ -2630,7 +2625,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2630 loff = *offset; 2625 loff = *offset;
2631 dlen = *buflen; 2626 dlen = *buflen;
2632 2627
2633 /** 2628 /*
2634 * First smem read, sync smem before proceeding 2629 * First smem read, sync smem before proceeding
2635 * No need to sync before reading every chunk. 2630 * No need to sync before reading every chunk.
2636 */ 2631 */
@@ -2657,7 +2652,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2657 return status; 2652 return status;
2658} 2653}
2659 2654
2660/** 2655/*
2661 * Firmware statistics 2656 * Firmware statistics
2662 */ 2657 */
2663bfa_status_t 2658bfa_status_t
@@ -2702,7 +2697,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2702 return status; 2697 return status;
2703} 2698}
2704 2699
2705/** 2700/*
2706 * Save firmware trace if configured. 2701 * Save firmware trace if configured.
2707 */ 2702 */
2708static void 2703static void
@@ -2716,7 +2711,7 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2716 } 2711 }
2717} 2712}
2718 2713
2719/** 2714/*
2720 * Firmware failure detected. Start recovery actions. 2715 * Firmware failure detected. Start recovery actions.
2721 */ 2716 */
2722static void 2717static void
@@ -2738,7 +2733,7 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2738 return; 2733 return;
2739} 2734}
2740 2735
2741/** 2736/*
2742 * hal_iocpf_pvt BFA IOC PF private functions 2737 * hal_iocpf_pvt BFA IOC PF private functions
2743 */ 2738 */
2744 2739
@@ -2795,7 +2790,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
2795 bfa_ioc_hw_sem_get(ioc); 2790 bfa_ioc_hw_sem_get(ioc);
2796} 2791}
2797 2792
2798/** 2793/*
2799 * bfa timer function 2794 * bfa timer function
2800 */ 2795 */
2801void 2796void
@@ -2840,7 +2835,7 @@ bfa_timer_beat(struct bfa_timer_mod_s *mod)
2840 } 2835 }
2841} 2836}
2842 2837
2843/** 2838/*
2844 * Should be called with lock protection 2839 * Should be called with lock protection
2845 */ 2840 */
2846void 2841void
@@ -2858,7 +2853,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2858 list_add_tail(&timer->qe, &mod->timer_q); 2853 list_add_tail(&timer->qe, &mod->timer_q);
2859} 2854}
2860 2855
2861/** 2856/*
2862 * Should be called with lock protection 2857 * Should be called with lock protection
2863 */ 2858 */
2864void 2859void
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 288c5801aac..9c407a87a1a 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -22,29 +22,29 @@
22#include "bfa_cs.h" 22#include "bfa_cs.h"
23#include "bfi.h" 23#include "bfi.h"
24 24
25/** 25/*
26 * BFA timer declarations 26 * BFA timer declarations
27 */ 27 */
28typedef void (*bfa_timer_cbfn_t)(void *); 28typedef void (*bfa_timer_cbfn_t)(void *);
29 29
30/** 30/*
31 * BFA timer data structure 31 * BFA timer data structure
32 */ 32 */
33struct bfa_timer_s { 33struct bfa_timer_s {
34 struct list_head qe; 34 struct list_head qe;
35 bfa_timer_cbfn_t timercb; 35 bfa_timer_cbfn_t timercb;
36 void *arg; 36 void *arg;
37 int timeout; /**< in millisecs. */ 37 int timeout; /* in millisecs */
38}; 38};
39 39
40/** 40/*
41 * Timer module structure 41 * Timer module structure
42 */ 42 */
43struct bfa_timer_mod_s { 43struct bfa_timer_mod_s {
44 struct list_head timer_q; 44 struct list_head timer_q;
45}; 45};
46 46
47#define BFA_TIMER_FREQ 200 /**< specified in millisecs */ 47#define BFA_TIMER_FREQ 200 /* specified in millisecs */
48 48
49void bfa_timer_beat(struct bfa_timer_mod_s *mod); 49void bfa_timer_beat(struct bfa_timer_mod_s *mod);
50void bfa_timer_init(struct bfa_timer_mod_s *mod); 50void bfa_timer_init(struct bfa_timer_mod_s *mod);
@@ -53,7 +53,7 @@ void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
53 unsigned int timeout); 53 unsigned int timeout);
54void bfa_timer_stop(struct bfa_timer_s *timer); 54void bfa_timer_stop(struct bfa_timer_s *timer);
55 55
56/** 56/*
57 * Generic Scatter Gather Element used by driver 57 * Generic Scatter Gather Element used by driver
58 */ 58 */
59struct bfa_sge_s { 59struct bfa_sge_s {
@@ -62,9 +62,9 @@ struct bfa_sge_s {
62}; 62};
63 63
64#define bfa_sge_word_swap(__sge) do { \ 64#define bfa_sge_word_swap(__sge) do { \
65 ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \ 65 ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \
66 ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \ 66 ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \
67 ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \ 67 ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \
68} while (0) 68} while (0)
69 69
70#define bfa_swap_words(_x) ( \ 70#define bfa_swap_words(_x) ( \
@@ -80,17 +80,17 @@ struct bfa_sge_s {
80#define bfa_sgaddr_le(_x) (_x) 80#define bfa_sgaddr_le(_x) (_x)
81#endif 81#endif
82 82
83/** 83/*
84 * PCI device information required by IOC 84 * PCI device information required by IOC
85 */ 85 */
86struct bfa_pcidev_s { 86struct bfa_pcidev_s {
87 int pci_slot; 87 int pci_slot;
88 u8 pci_func; 88 u8 pci_func;
89 u16 device_id; 89 u16 device_id;
90 bfa_os_addr_t pci_bar_kva; 90 void __iomem *pci_bar_kva;
91}; 91};
92 92
93/** 93/*
94 * Structure used to remember the DMA-able memory block's KVA and Physical 94 * Structure used to remember the DMA-able memory block's KVA and Physical
95 * Address 95 * Address
96 */ 96 */
@@ -102,7 +102,7 @@ struct bfa_dma_s {
102#define BFA_DMA_ALIGN_SZ 256 102#define BFA_DMA_ALIGN_SZ 256
103#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) 103#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
104 104
105/** 105/*
106 * smem size for Crossbow and Catapult 106 * smem size for Crossbow and Catapult
107 */ 107 */
108#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ 108#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
@@ -125,40 +125,38 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
125static inline void 125static inline void
126__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) 126__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
127{ 127{
128 dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa); 128 dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
129 dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa)); 129 dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
130} 130}
131 131
132struct bfa_ioc_regs_s { 132struct bfa_ioc_regs_s {
133 bfa_os_addr_t hfn_mbox_cmd; 133 void __iomem *hfn_mbox_cmd;
134 bfa_os_addr_t hfn_mbox; 134 void __iomem *hfn_mbox;
135 bfa_os_addr_t lpu_mbox_cmd; 135 void __iomem *lpu_mbox_cmd;
136 bfa_os_addr_t lpu_mbox; 136 void __iomem *lpu_mbox;
137 bfa_os_addr_t pss_ctl_reg; 137 void __iomem *pss_ctl_reg;
138 bfa_os_addr_t pss_err_status_reg; 138 void __iomem *pss_err_status_reg;
139 bfa_os_addr_t app_pll_fast_ctl_reg; 139 void __iomem *app_pll_fast_ctl_reg;
140 bfa_os_addr_t app_pll_slow_ctl_reg; 140 void __iomem *app_pll_slow_ctl_reg;
141 bfa_os_addr_t ioc_sem_reg; 141 void __iomem *ioc_sem_reg;
142 bfa_os_addr_t ioc_usage_sem_reg; 142 void __iomem *ioc_usage_sem_reg;
143 bfa_os_addr_t ioc_init_sem_reg; 143 void __iomem *ioc_init_sem_reg;
144 bfa_os_addr_t ioc_usage_reg; 144 void __iomem *ioc_usage_reg;
145 bfa_os_addr_t host_page_num_fn; 145 void __iomem *host_page_num_fn;
146 bfa_os_addr_t heartbeat; 146 void __iomem *heartbeat;
147 bfa_os_addr_t ioc_fwstate; 147 void __iomem *ioc_fwstate;
148 bfa_os_addr_t ll_halt; 148 void __iomem *ll_halt;
149 bfa_os_addr_t err_set; 149 void __iomem *err_set;
150 bfa_os_addr_t shirq_isr_next; 150 void __iomem *shirq_isr_next;
151 bfa_os_addr_t shirq_msk_next; 151 void __iomem *shirq_msk_next;
152 bfa_os_addr_t smem_page_start; 152 void __iomem *smem_page_start;
153 u32 smem_pg0; 153 u32 smem_pg0;
154}; 154};
155 155
156#define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr) 156#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off))))
157#define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val)
158#define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off)
159#define bfa_mem_write(_raddr, _off, _val) \ 157#define bfa_mem_write(_raddr, _off, _val) \
160 bfa_os_mem_write(_raddr, _off, _val) 158 writel(swab32((_val)), ((_raddr) + (_off)))
161/** 159/*
162 * IOC Mailbox structures 160 * IOC Mailbox structures
163 */ 161 */
164struct bfa_mbox_cmd_s { 162struct bfa_mbox_cmd_s {
@@ -166,7 +164,7 @@ struct bfa_mbox_cmd_s {
166 u32 msg[BFI_IOC_MSGSZ]; 164 u32 msg[BFI_IOC_MSGSZ];
167}; 165};
168 166
169/** 167/*
170 * IOC mailbox module 168 * IOC mailbox module
171 */ 169 */
172typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); 170typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
@@ -179,7 +177,7 @@ struct bfa_ioc_mbox_mod_s {
179 } mbhdlr[BFI_MC_MAX]; 177 } mbhdlr[BFI_MC_MAX];
180}; 178};
181 179
182/** 180/*
183 * IOC callback function interfaces 181 * IOC callback function interfaces
184 */ 182 */
185typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); 183typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
@@ -193,7 +191,7 @@ struct bfa_ioc_cbfn_s {
193 bfa_ioc_reset_cbfn_t reset_cbfn; 191 bfa_ioc_reset_cbfn_t reset_cbfn;
194}; 192};
195 193
196/** 194/*
197 * Heartbeat failure notification queue element. 195 * Heartbeat failure notification queue element.
198 */ 196 */
199struct bfa_ioc_hbfail_notify_s { 197struct bfa_ioc_hbfail_notify_s {
@@ -202,7 +200,7 @@ struct bfa_ioc_hbfail_notify_s {
202 void *cbarg; 200 void *cbarg;
203}; 201};
204 202
205/** 203/*
206 * Initialize a heartbeat failure notification structure 204 * Initialize a heartbeat failure notification structure
207 */ 205 */
208#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \ 206#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
@@ -249,7 +247,7 @@ struct bfa_ioc_s {
249}; 247};
250 248
251struct bfa_ioc_hwif_s { 249struct bfa_ioc_hwif_s {
252 bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode); 250 bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
253 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 251 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
254 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 252 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
255 void (*ioc_reg_init) (struct bfa_ioc_s *ioc); 253 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -267,7 +265,7 @@ struct bfa_ioc_hwif_s {
267#define bfa_ioc_fetch_stats(__ioc, __stats) \ 265#define bfa_ioc_fetch_stats(__ioc, __stats) \
268 (((__stats)->drv_stats) = (__ioc)->stats) 266 (((__stats)->drv_stats) = (__ioc)->stats)
269#define bfa_ioc_clr_stats(__ioc) \ 267#define bfa_ioc_clr_stats(__ioc) \
270 bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) 268 memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
271#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) 269#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
272#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 270#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
273#define bfa_ioc_speed_sup(__ioc) \ 271#define bfa_ioc_speed_sup(__ioc) \
@@ -287,7 +285,7 @@ struct bfa_ioc_hwif_s {
287#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 285#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
288#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 286#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
289 287
290/** 288/*
291 * IOC mailbox interface 289 * IOC mailbox interface
292 */ 290 */
293void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd); 291void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
@@ -299,7 +297,7 @@ void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
299void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, 297void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
300 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 298 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
301 299
302/** 300/*
303 * IOC interfaces 301 * IOC interfaces
304 */ 302 */
305 303
@@ -308,9 +306,9 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
308 (__ioc)->fcmode)) 306 (__ioc)->fcmode))
309 307
310bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); 308bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
311bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); 309bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
312bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb); 310bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
313bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); 311bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
314 312
315#define bfa_ioc_isr_mode_set(__ioc, __msix) \ 313#define bfa_ioc_isr_mode_set(__ioc, __msix) \
316 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 314 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
@@ -370,8 +368,8 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
370bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); 368bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
371void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 369void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
372 struct bfa_ioc_hbfail_notify_s *notify); 370 struct bfa_ioc_hbfail_notify_s *notify);
373bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg); 371bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
374void bfa_ioc_sem_release(bfa_os_addr_t sem_reg); 372void bfa_ioc_sem_release(void __iomem *sem_reg);
375void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); 373void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
376void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, 374void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
377 struct bfi_ioc_image_hdr_s *fwhdr); 375 struct bfi_ioc_image_hdr_s *fwhdr);
@@ -441,7 +439,7 @@ bfa_cb_image_get_size(int type)
441 } 439 }
442} 440}
443 441
444/** 442/*
445 * CNA TRCMOD declaration 443 * CNA TRCMOD declaration
446 */ 444 */
447/* 445/*
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index d7ac864d853..90994504385 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
34 34
35struct bfa_ioc_hwif_s hwif_cb; 35struct bfa_ioc_hwif_s hwif_cb;
36 36
37/** 37/*
38 * Called from bfa_ioc_attach() to map asic specific calls. 38 * Called from bfa_ioc_attach() to map asic specific calls.
39 */ 39 */
40void 40void
@@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
52 ioc->ioc_hwif = &hwif_cb; 52 ioc->ioc_hwif = &hwif_cb;
53} 53}
54 54
55/** 55/*
56 * Return true if firmware of current driver matches the running firmware. 56 * Return true if firmware of current driver matches the running firmware.
57 */ 57 */
58static bfa_boolean_t 58static bfa_boolean_t
@@ -66,17 +66,17 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
66{ 66{
67} 67}
68 68
69/** 69/*
70 * Notify other functions on HB failure. 70 * Notify other functions on HB failure.
71 */ 71 */
72static void 72static void
73bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) 73bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
74{ 74{
75 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); 75 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
76 bfa_reg_read(ioc->ioc_regs.err_set); 76 readl(ioc->ioc_regs.err_set);
77} 77}
78 78
79/** 79/*
80 * Host to LPU mailbox message addresses 80 * Host to LPU mailbox message addresses
81 */ 81 */
82static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 82static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } 84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
85}; 85};
86 86
87/** 87/*
88 * Host <-> LPU mailbox command/status registers 88 * Host <-> LPU mailbox command/status registers
89 */ 89 */
90static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { 90static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
@@ -96,7 +96,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
96static void 96static void
97bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) 97bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
98{ 98{
99 bfa_os_addr_t rb; 99 void __iomem *rb;
100 int pcifn = bfa_ioc_pcifn(ioc); 100 int pcifn = bfa_ioc_pcifn(ioc);
101 101
102 rb = bfa_ioc_bar0(ioc); 102 rb = bfa_ioc_bar0(ioc);
@@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
114 } 114 }
115 115
116 /** 116 /*
117 * Host <-> LPU mailbox command/status registers 117 * Host <-> LPU mailbox command/status registers
118 */ 118 */
119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; 119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
@@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
135 135
136 /** 136 /*
137 * sram memory access 137 * sram memory access
138 */ 138 */
139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
146} 146}
147 147
148/** 148/*
149 * Initialize IOC to port mapping. 149 * Initialize IOC to port mapping.
150 */ 150 */
151 151
152static void 152static void
153bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 153bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
154{ 154{
155 /** 155 /*
156 * For crossbow, port id is same as pci function. 156 * For crossbow, port id is same as pci function.
157 */ 157 */
158 ioc->port_id = bfa_ioc_pcifn(ioc); 158 ioc->port_id = bfa_ioc_pcifn(ioc);
@@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
160 bfa_trc(ioc, ioc->port_id); 160 bfa_trc(ioc, ioc->port_id);
161} 161}
162 162
163/** 163/*
164 * Set interrupt mode for a function: INTX or MSIX 164 * Set interrupt mode for a function: INTX or MSIX
165 */ 165 */
166static void 166static void
@@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
168{ 168{
169} 169}
170 170
171/** 171/*
172 * Cleanup hw semaphore and usecnt registers 172 * Cleanup hw semaphore and usecnt registers
173 */ 173 */
174static void 174static void
@@ -180,14 +180,14 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
180 * before we clear it. If it is not locked, writing 1 180 * before we clear it. If it is not locked, writing 1
181 * will lock it instead of clearing it. 181 * will lock it instead of clearing it.
182 */ 182 */
183 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 183 readl(ioc->ioc_regs.ioc_sem_reg);
184 bfa_ioc_hw_sem_release(ioc); 184 bfa_ioc_hw_sem_release(ioc);
185} 185}
186 186
187 187
188 188
189bfa_status_t 189bfa_status_t
190bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) 190bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
191{ 191{
192 u32 pll_sclk, pll_fclk; 192 u32 pll_sclk, pll_fclk;
193 193
@@ -199,38 +199,32 @@ bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
199 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | 199 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
200 __APP_PLL_400_JITLMT0_1(3U) | 200 __APP_PLL_400_JITLMT0_1(3U) |
201 __APP_PLL_400_CNTLMT0_1(3U); 201 __APP_PLL_400_CNTLMT0_1(3U);
202 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 202 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
203 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 203 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
204 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 204 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
205 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 205 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
206 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 206 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
207 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 207 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
208 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 208 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
209 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 209 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
210 bfa_reg_write(rb + APP_PLL_212_CTL_REG, 210 writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
211 __APP_PLL_212_LOGIC_SOFT_RESET); 211 writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
212 bfa_reg_write(rb + APP_PLL_212_CTL_REG, 212 rb + APP_PLL_212_CTL_REG);
213 __APP_PLL_212_BYPASS | 213 writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
214 __APP_PLL_212_LOGIC_SOFT_RESET); 214 writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
215 bfa_reg_write(rb + APP_PLL_400_CTL_REG, 215 rb + APP_PLL_400_CTL_REG);
216 __APP_PLL_400_LOGIC_SOFT_RESET); 216 udelay(2);
217 bfa_reg_write(rb + APP_PLL_400_CTL_REG, 217 writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
218 __APP_PLL_400_BYPASS | 218 writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
219 __APP_PLL_400_LOGIC_SOFT_RESET); 219 writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
220 bfa_os_udelay(2); 220 rb + APP_PLL_212_CTL_REG);
221 bfa_reg_write(rb + APP_PLL_212_CTL_REG, 221 writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
222 __APP_PLL_212_LOGIC_SOFT_RESET); 222 rb + APP_PLL_400_CTL_REG);
223 bfa_reg_write(rb + APP_PLL_400_CTL_REG, 223 udelay(2000);
224 __APP_PLL_400_LOGIC_SOFT_RESET); 224 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
225 bfa_reg_write(rb + APP_PLL_212_CTL_REG, 225 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
226 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); 226 writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
227 bfa_reg_write(rb + APP_PLL_400_CTL_REG, 227 writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
228 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
229 bfa_os_udelay(2000);
230 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
231 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
232 bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
233 bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
234 228
235 return BFA_STATUS_OK; 229 return BFA_STATUS_OK;
236} 230}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index f21b82c5f64..115730c0aa7 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
34 34
35struct bfa_ioc_hwif_s hwif_ct; 35struct bfa_ioc_hwif_s hwif_ct;
36 36
37/** 37/*
38 * Called from bfa_ioc_attach() to map asic specific calls. 38 * Called from bfa_ioc_attach() to map asic specific calls.
39 */ 39 */
40void 40void
@@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
52 ioc->ioc_hwif = &hwif_ct; 52 ioc->ioc_hwif = &hwif_ct;
53} 53}
54 54
55/** 55/*
56 * Return true if firmware of current driver matches the running firmware. 56 * Return true if firmware of current driver matches the running firmware.
57 */ 57 */
58static bfa_boolean_t 58static bfa_boolean_t
@@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
62 u32 usecnt; 62 u32 usecnt;
63 struct bfi_ioc_image_hdr_s fwhdr; 63 struct bfi_ioc_image_hdr_s fwhdr;
64 64
65 /** 65 /*
66 * Firmware match check is relevant only for CNA. 66 * Firmware match check is relevant only for CNA.
67 */ 67 */
68 if (!ioc->cna) 68 if (!ioc->cna)
69 return BFA_TRUE; 69 return BFA_TRUE;
70 70
71 /** 71 /*
72 * If bios boot (flash based) -- do not increment usage count 72 * If bios boot (flash based) -- do not increment usage count
73 */ 73 */
74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
@@ -76,27 +76,27 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
76 return BFA_TRUE; 76 return BFA_TRUE;
77 77
78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
79 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 79 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
80 80
81 /** 81 /*
82 * If usage count is 0, always return TRUE. 82 * If usage count is 0, always return TRUE.
83 */ 83 */
84 if (usecnt == 0) { 84 if (usecnt == 0) {
85 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); 85 writel(1, ioc->ioc_regs.ioc_usage_reg);
86 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 86 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
87 bfa_trc(ioc, usecnt); 87 bfa_trc(ioc, usecnt);
88 return BFA_TRUE; 88 return BFA_TRUE;
89 } 89 }
90 90
91 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); 91 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
92 bfa_trc(ioc, ioc_fwstate); 92 bfa_trc(ioc, ioc_fwstate);
93 93
94 /** 94 /*
95 * Use count cannot be non-zero and chip in uninitialized state. 95 * Use count cannot be non-zero and chip in uninitialized state.
96 */ 96 */
97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
98 98
99 /** 99 /*
100 * Check if another driver with a different firmware is active 100 * Check if another driver with a different firmware is active
101 */ 101 */
102 bfa_ioc_fwver_get(ioc, &fwhdr); 102 bfa_ioc_fwver_get(ioc, &fwhdr);
@@ -106,11 +106,11 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
106 return BFA_FALSE; 106 return BFA_FALSE;
107 } 107 }
108 108
109 /** 109 /*
110 * Same firmware version. Increment the reference count. 110 * Same firmware version. Increment the reference count.
111 */ 111 */
112 usecnt++; 112 usecnt++;
113 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 113 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
114 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 114 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
115 bfa_trc(ioc, usecnt); 115 bfa_trc(ioc, usecnt);
116 return BFA_TRUE; 116 return BFA_TRUE;
@@ -121,50 +121,50 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
121{ 121{
122 u32 usecnt; 122 u32 usecnt;
123 123
124 /** 124 /*
125 * Firmware lock is relevant only for CNA. 125 * Firmware lock is relevant only for CNA.
126 */ 126 */
127 if (!ioc->cna) 127 if (!ioc->cna)
128 return; 128 return;
129 129
130 /** 130 /*
131 * If bios boot (flash based) -- do not decrement usage count 131 * If bios boot (flash based) -- do not decrement usage count
132 */ 132 */
133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
134 BFA_IOC_FWIMG_MINSZ) 134 BFA_IOC_FWIMG_MINSZ)
135 return; 135 return;
136 136
137 /** 137 /*
138 * decrement usage count 138 * decrement usage count
139 */ 139 */
140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
141 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); 141 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
142 bfa_assert(usecnt > 0); 142 bfa_assert(usecnt > 0);
143 143
144 usecnt--; 144 usecnt--;
145 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); 145 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
146 bfa_trc(ioc, usecnt); 146 bfa_trc(ioc, usecnt);
147 147
148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
149} 149}
150 150
151/** 151/*
152 * Notify other functions on HB failure. 152 * Notify other functions on HB failure.
153 */ 153 */
154static void 154static void
155bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) 155bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
156{ 156{
157 if (ioc->cna) { 157 if (ioc->cna) {
158 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); 158 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
159 /* Wait for halt to take effect */ 159 /* Wait for halt to take effect */
160 bfa_reg_read(ioc->ioc_regs.ll_halt); 160 readl(ioc->ioc_regs.ll_halt);
161 } else { 161 } else {
162 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); 162 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
163 bfa_reg_read(ioc->ioc_regs.err_set); 163 readl(ioc->ioc_regs.err_set);
164 } 164 }
165} 165}
166 166
167/** 167/*
168 * Host to LPU mailbox message addresses 168 * Host to LPU mailbox message addresses
169 */ 169 */
170static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 170static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
175}; 175};
176 176
177/** 177/*
178 * Host <-> LPU mailbox command/status registers - port 0 178 * Host <-> LPU mailbox command/status registers - port 0
179 */ 179 */
180static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { 180static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
@@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
185}; 185};
186 186
187/** 187/*
188 * Host <-> LPU mailbox command/status registers - port 1 188 * Host <-> LPU mailbox command/status registers - port 1
189 */ 189 */
190static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { 190static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
@@ -197,7 +197,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
197static void 197static void
198bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) 198bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
199{ 199{
200 bfa_os_addr_t rb; 200 void __iomem *rb;
201 int pcifn = bfa_ioc_pcifn(ioc); 201 int pcifn = bfa_ioc_pcifn(ioc);
202 202
203 rb = bfa_ioc_bar0(ioc); 203 rb = bfa_ioc_bar0(ioc);
@@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
238 238
239 /** 239 /*
240 * sram memory access 240 * sram memory access
241 */ 241 */
242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
249} 249}
250 250
251/** 251/*
252 * Initialize IOC to port mapping. 252 * Initialize IOC to port mapping.
253 */ 253 */
254 254
@@ -256,13 +256,13 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
256static void 256static void
257bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) 257bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
258{ 258{
259 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 259 void __iomem *rb = ioc->pcidev.pci_bar_kva;
260 u32 r32; 260 u32 r32;
261 261
262 /** 262 /*
263 * For catapult, base port id on personality register and IOC type 263 * For catapult, base port id on personality register and IOC type
264 */ 264 */
265 r32 = bfa_reg_read(rb + FNC_PERS_REG); 265 r32 = readl(rb + FNC_PERS_REG);
266 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); 266 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
267 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; 267 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
268 268
@@ -270,22 +270,22 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
270 bfa_trc(ioc, ioc->port_id); 270 bfa_trc(ioc, ioc->port_id);
271} 271}
272 272
273/** 273/*
274 * Set interrupt mode for a function: INTX or MSIX 274 * Set interrupt mode for a function: INTX or MSIX
275 */ 275 */
276static void 276static void
277bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) 277bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
278{ 278{
279 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; 279 void __iomem *rb = ioc->pcidev.pci_bar_kva;
280 u32 r32, mode; 280 u32 r32, mode;
281 281
282 r32 = bfa_reg_read(rb + FNC_PERS_REG); 282 r32 = readl(rb + FNC_PERS_REG);
283 bfa_trc(ioc, r32); 283 bfa_trc(ioc, r32);
284 284
285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
286 __F0_INTX_STATUS; 286 __F0_INTX_STATUS;
287 287
288 /** 288 /*
289 * If already in desired mode, do not change anything 289 * If already in desired mode, do not change anything
290 */ 290 */
291 if (!msix && mode) 291 if (!msix && mode)
@@ -300,10 +300,10 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
300 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); 300 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
301 bfa_trc(ioc, r32); 301 bfa_trc(ioc, r32);
302 302
303 bfa_reg_write(rb + FNC_PERS_REG, r32); 303 writel(r32, rb + FNC_PERS_REG);
304} 304}
305 305
306/** 306/*
307 * Cleanup hw semaphore and usecnt registers 307 * Cleanup hw semaphore and usecnt registers
308 */ 308 */
309static void 309static void
@@ -312,7 +312,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
312 312
313 if (ioc->cna) { 313 if (ioc->cna) {
314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); 315 writel(0, ioc->ioc_regs.ioc_usage_reg);
316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
317 } 317 }
318 318
@@ -321,7 +321,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
321 * before we clear it. If it is not locked, writing 1 321 * before we clear it. If it is not locked, writing 1
322 * will lock it instead of clearing it. 322 * will lock it instead of clearing it.
323 */ 323 */
324 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 324 readl(ioc->ioc_regs.ioc_sem_reg);
325 bfa_ioc_hw_sem_release(ioc); 325 bfa_ioc_hw_sem_release(ioc);
326} 326}
327 327
@@ -331,17 +331,17 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
331 * Check the firmware state to know if pll_init has been completed already 331 * Check the firmware state to know if pll_init has been completed already
332 */ 332 */
333bfa_boolean_t 333bfa_boolean_t
334bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb) 334bfa_ioc_ct_pll_init_complete(void __iomem *rb)
335{ 335{
336 if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || 336 if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
337 (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) 337 (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
338 return BFA_TRUE; 338 return BFA_TRUE;
339 339
340 return BFA_FALSE; 340 return BFA_FALSE;
341} 341}
342 342
343bfa_status_t 343bfa_status_t
344bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) 344bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
345{ 345{
346 u32 pll_sclk, pll_fclk, r32; 346 u32 pll_sclk, pll_fclk, r32;
347 347
@@ -354,56 +354,51 @@ bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
354 __APP_PLL_425_JITLMT0_1(3U) | 354 __APP_PLL_425_JITLMT0_1(3U) |
355 __APP_PLL_425_CNTLMT0_1(1U); 355 __APP_PLL_425_CNTLMT0_1(1U);
356 if (fcmode) { 356 if (fcmode) {
357 bfa_reg_write((rb + OP_MODE), 0); 357 writel(0, (rb + OP_MODE));
358 bfa_reg_write((rb + ETH_MAC_SER_REG), 358 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
359 __APP_EMS_CMLCKSEL | 359 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
360 __APP_EMS_REFCKBUFEN2 |
361 __APP_EMS_CHANNEL_SEL);
362 } else { 360 } else {
363 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); 361 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
364 bfa_reg_write((rb + ETH_MAC_SER_REG), 362 writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
365 __APP_EMS_REFCKBUFEN1);
366 } 363 }
367 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); 364 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
368 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); 365 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
369 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 366 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
370 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 367 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
371 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 368 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
372 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 369 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
373 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); 370 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
374 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); 371 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
375 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | 372 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
376 __APP_PLL_312_LOGIC_SOFT_RESET); 373 rb + APP_PLL_312_CTL_REG);
377 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | 374 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
378 __APP_PLL_425_LOGIC_SOFT_RESET); 375 rb + APP_PLL_425_CTL_REG);
379 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | 376 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
380 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); 377 rb + APP_PLL_312_CTL_REG);
381 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | 378 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
382 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); 379 rb + APP_PLL_425_CTL_REG);
383 bfa_reg_read(rb + HOSTFN0_INT_MSK); 380 readl(rb + HOSTFN0_INT_MSK);
384 bfa_os_udelay(2000); 381 udelay(2000);
385 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); 382 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
386 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); 383 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
387 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | 384 writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
388 __APP_PLL_312_ENABLE); 385 writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
389 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
390 __APP_PLL_425_ENABLE);
391 if (!fcmode) { 386 if (!fcmode) {
392 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P); 387 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
393 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P); 388 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
394 } 389 }
395 r32 = bfa_reg_read((rb + PSS_CTL_REG)); 390 r32 = readl((rb + PSS_CTL_REG));
396 r32 &= ~__PSS_LMEM_RESET; 391 r32 &= ~__PSS_LMEM_RESET;
397 bfa_reg_write((rb + PSS_CTL_REG), r32); 392 writel(r32, (rb + PSS_CTL_REG));
398 bfa_os_udelay(1000); 393 udelay(1000);
399 if (!fcmode) { 394 if (!fcmode) {
400 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0); 395 writel(0, (rb + PMM_1T_RESET_REG_P0));
401 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0); 396 writel(0, (rb + PMM_1T_RESET_REG_P1));
402 } 397 }
403 398
404 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); 399 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
405 bfa_os_udelay(1000); 400 udelay(1000);
406 r32 = bfa_reg_read((rb + MBIST_STAT_REG)); 401 r32 = readl((rb + MBIST_STAT_REG));
407 bfa_reg_write((rb + MBIST_CTL_REG), 0); 402 writel(0, (rb + MBIST_CTL_REG));
408 return BFA_STATUS_OK; 403 return BFA_STATUS_OK;
409} 404}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 2cd52733867..15407ab39e7 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfa_modules.h BFA modules 19 * bfa_modules.h BFA modules
20 */ 20 */
21 21
@@ -52,7 +52,7 @@ enum {
52}; 52};
53 53
54 54
55/** 55/*
56 * Macro to define a new BFA module 56 * Macro to define a new BFA module
57 */ 57 */
58#define BFA_MODULE(__mod) \ 58#define BFA_MODULE(__mod) \
@@ -80,7 +80,7 @@ enum {
80 80
81#define BFA_CACHELINE_SZ (256) 81#define BFA_CACHELINE_SZ (256)
82 82
83/** 83/*
84 * Structure used to interact between different BFA sub modules 84 * Structure used to interact between different BFA sub modules
85 * 85 *
86 * Each sub module needs to implement only the entry points relevant to it (and 86 * Each sub module needs to implement only the entry points relevant to it (and
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 788a250ffb8..65df62ef437 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -15,10 +15,6 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/**
19 * Contains declarations all OS Specific files needed for BFA layer
20 */
21
22#ifndef __BFA_OS_INC_H__ 18#ifndef __BFA_OS_INC_H__
23#define __BFA_OS_INC_H__ 19#define __BFA_OS_INC_H__
24 20
@@ -44,11 +40,6 @@
44#define __BIGENDIAN 40#define __BIGENDIAN
45#endif 41#endif
46 42
47static inline u64 bfa_os_get_clock(void)
48{
49 return jiffies;
50}
51
52static inline u64 bfa_os_get_log_time(void) 43static inline u64 bfa_os_get_log_time(void)
53{ 44{
54 u64 system_time = 0; 45 u64 system_time = 0;
@@ -63,13 +54,6 @@ static inline u64 bfa_os_get_log_time(void)
63#define bfa_io_lat_clock_res_div HZ 54#define bfa_io_lat_clock_res_div HZ
64#define bfa_io_lat_clock_res_mul 1000 55#define bfa_io_lat_clock_res_mul 1000
65 56
66#define BFA_ASSERT(p) do { \
67 if (!(p)) { \
68 printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
69 #p, __FILE__, __LINE__); \
70 } \
71} while (0)
72
73#define BFA_LOG(level, bfad, mask, fmt, arg...) \ 57#define BFA_LOG(level, bfad, mask, fmt, arg...) \
74do { \ 58do { \
75 if (((mask) == 4) || (level[1] <= '4')) \ 59 if (((mask) == 4) || (level[1] <= '4')) \
@@ -81,22 +65,6 @@ do { \
81 ((_x) & 0x00ff00) | \ 65 ((_x) & 0x00ff00) | \
82 (((_x) & 0xff0000) >> 16)) 66 (((_x) & 0xff0000) >> 16))
83 67
84#define bfa_swap_8b(_x) \
85 ((((_x) & 0xff00000000000000ull) >> 56) \
86 | (((_x) & 0x00ff000000000000ull) >> 40) \
87 | (((_x) & 0x0000ff0000000000ull) >> 24) \
88 | (((_x) & 0x000000ff00000000ull) >> 8) \
89 | (((_x) & 0x00000000ff000000ull) << 8) \
90 | (((_x) & 0x0000000000ff0000ull) << 24) \
91 | (((_x) & 0x000000000000ff00ull) << 40) \
92 | (((_x) & 0x00000000000000ffull) << 56))
93
94#define bfa_os_swap32(_x) \
95 ((((_x) & 0xff) << 24) | \
96 (((_x) & 0x0000ff00) << 8) | \
97 (((_x) & 0x00ff0000) >> 8) | \
98 (((_x) & 0xff000000) >> 24))
99
100#define bfa_os_swap_sgaddr(_x) ((u64)( \ 68#define bfa_os_swap_sgaddr(_x) ((u64)( \
101 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ 69 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
102 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ 70 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
@@ -108,59 +76,27 @@ do { \
108 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) 76 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
109 77
110#ifndef __BIGENDIAN 78#ifndef __BIGENDIAN
111#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ 79#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
112 (((_x) & 0x00ff) << 8)))
113#define bfa_os_htonl(_x) bfa_os_swap32(_x)
114#define bfa_os_htonll(_x) bfa_swap_8b(_x)
115#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
116#define bfa_os_wtole(_x) (_x)
117#define bfa_os_sgaddr(_x) (_x) 80#define bfa_os_sgaddr(_x) (_x)
118
119#else 81#else
120
121#define bfa_os_htons(_x) (_x)
122#define bfa_os_htonl(_x) (_x)
123#define bfa_os_hton3b(_x) (_x) 82#define bfa_os_hton3b(_x) (_x)
124#define bfa_os_htonll(_x) (_x)
125#define bfa_os_wtole(_x) bfa_os_swap32(_x)
126#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x) 83#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
127
128#endif 84#endif
129 85
130#define bfa_os_ntohs(_x) bfa_os_htons(_x)
131#define bfa_os_ntohl(_x) bfa_os_htonl(_x)
132#define bfa_os_ntohll(_x) bfa_os_htonll(_x)
133#define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x) 86#define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x)
134
135#define bfa_os_u32(__pa64) ((__pa64) >> 32) 87#define bfa_os_u32(__pa64) ((__pa64) >> 32)
136 88
137#define bfa_os_memset memset 89#define BFA_TRC_TS(_trcm) \
138#define bfa_os_memcpy memcpy 90 ({ \
139#define bfa_os_udelay udelay 91 struct timeval tv; \
140#define bfa_os_vsprintf vsprintf 92 \
141#define bfa_os_snprintf snprintf 93 do_gettimeofday(&tv); \
142 94 (tv.tv_sec*1000000+tv.tv_usec); \
143#define bfa_os_assign(__t, __s) __t = __s 95 })
144#define bfa_os_addr_t void __iomem *
145
146#define bfa_os_reg_read(_raddr) readl(_raddr)
147#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
148#define bfa_os_mem_read(_raddr, _off) \
149 bfa_os_swap32(readl(((_raddr) + (_off))))
150#define bfa_os_mem_write(_raddr, _off, _val) \
151 writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
152
153#define BFA_TRC_TS(_trcm) \
154 ({ \
155 struct timeval tv; \
156 \
157 do_gettimeofday(&tv); \
158 (tv.tv_sec*1000000+tv.tv_usec); \
159 })
160 96
161#define boolean_t int 97#define boolean_t int
162 98
163/** 99/*
164 * For current time stamp, OS API will fill-in 100 * For current time stamp, OS API will fill-in
165 */ 101 */
166struct bfa_timeval_s { 102struct bfa_timeval_s {
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index b6d170a13be..fff96226a38 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -37,16 +37,16 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
37 t0 = dip[i]; 37 t0 = dip[i];
38 t1 = dip[i + 1]; 38 t1 = dip[i + 1];
39#ifdef __BIGENDIAN 39#ifdef __BIGENDIAN
40 dip[i] = bfa_os_ntohl(t0); 40 dip[i] = be32_to_cpu(t0);
41 dip[i + 1] = bfa_os_ntohl(t1); 41 dip[i + 1] = be32_to_cpu(t1);
42#else 42#else
43 dip[i] = bfa_os_ntohl(t1); 43 dip[i] = be32_to_cpu(t1);
44 dip[i + 1] = bfa_os_ntohl(t0); 44 dip[i + 1] = be32_to_cpu(t0);
45#endif 45#endif
46 } 46 }
47} 47}
48 48
49/** 49/*
50 * bfa_port_enable_isr() 50 * bfa_port_enable_isr()
51 * 51 *
52 * 52 *
@@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
63 port->endis_cbfn(port->endis_cbarg, status); 63 port->endis_cbfn(port->endis_cbarg, status);
64} 64}
65 65
66/** 66/*
67 * bfa_port_disable_isr() 67 * bfa_port_disable_isr()
68 * 68 *
69 * 69 *
@@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
80 port->endis_cbfn(port->endis_cbarg, status); 80 port->endis_cbfn(port->endis_cbarg, status);
81} 81}
82 82
83/** 83/*
84 * bfa_port_get_stats_isr() 84 * bfa_port_get_stats_isr()
85 * 85 *
86 * 86 *
@@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
112 } 112 }
113} 113}
114 114
115/** 115/*
116 * bfa_port_clear_stats_isr() 116 * bfa_port_clear_stats_isr()
117 * 117 *
118 * 118 *
@@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
129 port->stats_status = status; 129 port->stats_status = status;
130 port->stats_busy = BFA_FALSE; 130 port->stats_busy = BFA_FALSE;
131 131
132 /** 132 /*
133 * re-initialize time stamp for stats reset 133 * re-initialize time stamp for stats reset
134 */ 134 */
135 bfa_os_gettimeofday(&tv); 135 bfa_os_gettimeofday(&tv);
@@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
141 } 141 }
142} 142}
143 143
144/** 144/*
145 * bfa_port_isr() 145 * bfa_port_isr()
146 * 146 *
147 * 147 *
@@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
189 } 189 }
190} 190}
191 191
192/** 192/*
193 * bfa_port_meminfo() 193 * bfa_port_meminfo()
194 * 194 *
195 * 195 *
@@ -203,7 +203,7 @@ bfa_port_meminfo(void)
203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
204} 204}
205 205
206/** 206/*
207 * bfa_port_mem_claim() 207 * bfa_port_mem_claim()
208 * 208 *
209 * 209 *
@@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
220 port->stats_dma.pa = dma_pa; 220 port->stats_dma.pa = dma_pa;
221} 221}
222 222
223/** 223/*
224 * bfa_port_enable() 224 * bfa_port_enable()
225 * 225 *
226 * Send the Port enable request to the f/w 226 * Send the Port enable request to the f/w
@@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
264 return BFA_STATUS_OK; 264 return BFA_STATUS_OK;
265} 265}
266 266
267/** 267/*
268 * bfa_port_disable() 268 * bfa_port_disable()
269 * 269 *
270 * Send the Port disable request to the f/w 270 * Send the Port disable request to the f/w
@@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
308 return BFA_STATUS_OK; 308 return BFA_STATUS_OK;
309} 309}
310 310
311/** 311/*
312 * bfa_port_get_stats() 312 * bfa_port_get_stats()
313 * 313 *
314 * Send the request to the f/w to fetch Port statistics. 314 * Send the request to the f/w to fetch Port statistics.
@@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
348 return BFA_STATUS_OK; 348 return BFA_STATUS_OK;
349} 349}
350 350
351/** 351/*
352 * bfa_port_clear_stats() 352 * bfa_port_clear_stats()
353 * 353 *
354 * 354 *
@@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
385 return BFA_STATUS_OK; 385 return BFA_STATUS_OK;
386} 386}
387 387
388/** 388/*
389 * bfa_port_hbfail() 389 * bfa_port_hbfail()
390 * 390 *
391 * 391 *
@@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg)
415 } 415 }
416} 416}
417 417
418/** 418/*
419 * bfa_port_attach() 419 * bfa_port_attach()
420 * 420 *
421 * 421 *
@@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
451 451
452 /** 452 /*
453 * initialize time stamp for stats reset 453 * initialize time stamp for stats reset
454 */ 454 */
455 bfa_os_gettimeofday(&tv); 455 bfa_os_gettimeofday(&tv);
@@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
458 bfa_trc(port, 0); 458 bfa_trc(port, 0);
459} 459}
460 460
461/** 461/*
462 * bfa_port_detach() 462 * bfa_port_detach()
463 * 463 *
464 * 464 *
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa1dc749b28..c768143f480 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -29,7 +29,7 @@ BFA_MODULE(fcport);
29BFA_MODULE(rport); 29BFA_MODULE(rport);
30BFA_MODULE(uf); 30BFA_MODULE(uf);
31 31
32/** 32/*
33 * LPS related definitions 33 * LPS related definitions
34 */ 34 */
35#define BFA_LPS_MIN_LPORTS (1) 35#define BFA_LPS_MIN_LPORTS (1)
@@ -41,7 +41,7 @@ BFA_MODULE(uf);
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43 43
44/** 44/*
45 * lps_pvt BFA LPS private functions 45 * lps_pvt BFA LPS private functions
46 */ 46 */
47 47
@@ -55,7 +55,7 @@ enum bfa_lps_event {
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ 55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
56}; 56};
57 57
58/** 58/*
59 * FC PORT related definitions 59 * FC PORT related definitions
60 */ 60 */
61/* 61/*
@@ -67,7 +67,7 @@ enum bfa_lps_event {
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68 68
69 69
70/** 70/*
71 * BFA port state machine events 71 * BFA port state machine events
72 */ 72 */
73enum bfa_fcport_sm_event { 73enum bfa_fcport_sm_event {
@@ -82,7 +82,7 @@ enum bfa_fcport_sm_event {
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
83}; 83};
84 84
85/** 85/*
86 * BFA port link notification state machine events 86 * BFA port link notification state machine events
87 */ 87 */
88 88
@@ -92,7 +92,7 @@ enum bfa_fcport_ln_sm_event {
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
93}; 93};
94 94
95/** 95/*
96 * RPORT related definitions 96 * RPORT related definitions
97 */ 97 */
98#define bfa_rport_offline_cb(__rp) do { \ 98#define bfa_rport_offline_cb(__rp) do { \
@@ -126,7 +126,7 @@ enum bfa_rport_event {
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ 126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
127}; 127};
128 128
129/** 129/*
130 * forward declarations FCXP related functions 130 * forward declarations FCXP related functions
131 */ 131 */
132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); 132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
@@ -138,7 +138,7 @@ static void bfa_fcxp_qresume(void *cbarg);
138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, 138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req); 139 struct bfi_fcxp_send_req_s *send_req);
140 140
141/** 141/*
142 * forward declarations for LPS functions 142 * forward declarations for LPS functions
143 */ 143 */
144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
@@ -163,7 +163,7 @@ static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 163static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 164static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
165 165
166/** 166/*
167 * forward declaration for LPS state machine 167 * forward declaration for LPS state machine
168 */ 168 */
169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); 169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -175,7 +175,7 @@ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event); 176 event);
177 177
178/** 178/*
179 * forward declaration for FC Port functions 179 * forward declaration for FC Port functions
180 */ 180 */
181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); 181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
@@ -193,7 +193,7 @@ static void bfa_fcport_stats_get_timeout(void *cbarg);
193static void bfa_fcport_stats_clr_timeout(void *cbarg); 193static void bfa_fcport_stats_clr_timeout(void *cbarg);
194static void bfa_trunk_iocdisable(struct bfa_s *bfa); 194static void bfa_trunk_iocdisable(struct bfa_s *bfa);
195 195
196/** 196/*
197 * forward declaration for FC PORT state machine 197 * forward declaration for FC PORT state machine
198 */ 198 */
199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, 199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
@@ -252,7 +252,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
252}; 252};
253 253
254 254
255/** 255/*
256 * forward declaration for RPORT related functions 256 * forward declaration for RPORT related functions
257 */ 257 */
258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); 258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
@@ -265,7 +265,7 @@ static void __bfa_cb_rport_online(void *cbarg,
265static void __bfa_cb_rport_offline(void *cbarg, 265static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete); 266 bfa_boolean_t complete);
267 267
268/** 268/*
269 * forward declaration for RPORT state machine 269 * forward declaration for RPORT state machine
270 */ 270 */
271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, 271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
@@ -295,7 +295,7 @@ static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, 295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event); 296 enum bfa_rport_event event);
297 297
298/** 298/*
299 * PLOG related definitions 299 * PLOG related definitions
300 */ 300 */
301static int 301static int
@@ -330,7 +330,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
330 330
331 pl_recp = &(plog->plog_recs[tail]); 331 pl_recp = &(plog->plog_recs[tail]);
332 332
333 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); 333 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
334 334
335 pl_recp->tv = bfa_os_get_log_time(); 335 pl_recp->tv = bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail); 336 BFA_PL_LOG_REC_INCR(plog->tail);
@@ -342,9 +342,9 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
342void 342void
343bfa_plog_init(struct bfa_plog_s *plog) 343bfa_plog_init(struct bfa_plog_s *plog)
344{ 344{
345 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s)); 345 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
346 346
347 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); 347 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 plog->head = plog->tail = 0; 348 plog->head = plog->tail = 0;
349 plog->plog_enabled = 1; 349 plog->plog_enabled = 1;
350} 350}
@@ -357,7 +357,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
357 struct bfa_plog_rec_s lp; 357 struct bfa_plog_rec_s lp;
358 358
359 if (plog->plog_enabled) { 359 if (plog->plog_enabled) {
360 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 360 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 lp.mid = mid; 361 lp.mid = mid;
362 lp.eid = event; 362 lp.eid = event;
363 lp.log_type = BFA_PL_LOG_TYPE_STRING; 363 lp.log_type = BFA_PL_LOG_TYPE_STRING;
@@ -381,15 +381,14 @@ bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
381 num_ints = BFA_PL_INT_LOG_SZ; 381 num_ints = BFA_PL_INT_LOG_SZ;
382 382
383 if (plog->plog_enabled) { 383 if (plog->plog_enabled) {
384 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 384 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
385 lp.mid = mid; 385 lp.mid = mid;
386 lp.eid = event; 386 lp.eid = event;
387 lp.log_type = BFA_PL_LOG_TYPE_INT; 387 lp.log_type = BFA_PL_LOG_TYPE_INT;
388 lp.misc = misc; 388 lp.misc = misc;
389 389
390 for (i = 0; i < num_ints; i++) 390 for (i = 0; i < num_ints; i++)
391 bfa_os_assign(lp.log_entry.int_log[i], 391 lp.log_entry.int_log[i] = intarr[i];
392 intarr[i]);
393 392
394 lp.log_num_ints = (u8) num_ints; 393 lp.log_num_ints = (u8) num_ints;
395 394
@@ -407,7 +406,7 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
407 u32 ints[BFA_PL_INT_LOG_SZ]; 406 u32 ints[BFA_PL_INT_LOG_SZ];
408 407
409 if (plog->plog_enabled) { 408 if (plog->plog_enabled) {
410 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 409 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
411 410
412 ints[0] = tmp_int[0]; 411 ints[0] = tmp_int[0];
413 ints[1] = tmp_int[1]; 412 ints[1] = tmp_int[1];
@@ -427,7 +426,7 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
427 u32 ints[BFA_PL_INT_LOG_SZ]; 426 u32 ints[BFA_PL_INT_LOG_SZ];
428 427
429 if (plog->plog_enabled) { 428 if (plog->plog_enabled) {
430 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); 429 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
431 430
432 ints[0] = tmp_int[0]; 431 ints[0] = tmp_int[0];
433 ints[1] = tmp_int[1]; 432 ints[1] = tmp_int[1];
@@ -462,7 +461,7 @@ bfa_plog_get_setting(struct bfa_plog_s *plog)
462 return (bfa_boolean_t)plog->plog_enabled; 461 return (bfa_boolean_t)plog->plog_enabled;
463} 462}
464 463
465/** 464/*
466 * fcxp_pvt BFA FCXP private functions 465 * fcxp_pvt BFA FCXP private functions
467 */ 466 */
468 467
@@ -485,7 +484,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
485 mod->req_pld_list_pa = dm_pa; 484 mod->req_pld_list_pa = dm_pa;
486 dm_kva += buf_pool_sz; 485 dm_kva += buf_pool_sz;
487 dm_pa += buf_pool_sz; 486 dm_pa += buf_pool_sz;
488 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz); 487 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
489 488
490 /* 489 /*
491 * Initialize the fcxp rsp payload list 490 * Initialize the fcxp rsp payload list
@@ -495,7 +494,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
495 mod->rsp_pld_list_pa = dm_pa; 494 mod->rsp_pld_list_pa = dm_pa;
496 dm_kva += buf_pool_sz; 495 dm_kva += buf_pool_sz;
497 dm_pa += buf_pool_sz; 496 dm_pa += buf_pool_sz;
498 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); 497 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
499 498
500 bfa_meminfo_dma_virt(mi) = dm_kva; 499 bfa_meminfo_dma_virt(mi) = dm_kva;
501 bfa_meminfo_dma_phys(mi) = dm_pa; 500 bfa_meminfo_dma_phys(mi) = dm_pa;
@@ -508,7 +507,7 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
508 struct bfa_fcxp_s *fcxp; 507 struct bfa_fcxp_s *fcxp;
509 508
510 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); 509 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
511 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); 510 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
512 511
513 INIT_LIST_HEAD(&mod->fcxp_free_q); 512 INIT_LIST_HEAD(&mod->fcxp_free_q);
514 INIT_LIST_HEAD(&mod->fcxp_active_q); 513 INIT_LIST_HEAD(&mod->fcxp_active_q);
@@ -559,11 +558,11 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
559{ 558{
560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 559 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
561 560
562 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); 561 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
563 mod->bfa = bfa; 562 mod->bfa = bfa;
564 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 563 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
565 564
566 /** 565 /*
567 * Initialize FCXP request and response payload sizes. 566 * Initialize FCXP request and response payload sizes.
568 */ 567 */
569 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; 568 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
@@ -741,20 +740,20 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
741{ 740{
742 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 741 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
743 struct bfa_fcxp_s *fcxp; 742 struct bfa_fcxp_s *fcxp;
744 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag); 743 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
745 744
746 bfa_trc(bfa, fcxp_tag); 745 bfa_trc(bfa, fcxp_tag);
747 746
748 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len); 747 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
749 748
750 /** 749 /*
751 * @todo f/w should not set residue to non-0 when everything 750 * @todo f/w should not set residue to non-0 when everything
752 * is received. 751 * is received.
753 */ 752 */
754 if (fcxp_rsp->req_status == BFA_STATUS_OK) 753 if (fcxp_rsp->req_status == BFA_STATUS_OK)
755 fcxp_rsp->residue_len = 0; 754 fcxp_rsp->residue_len = 0;
756 else 755 else
757 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len); 756 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
758 757
759 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); 758 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
760 759
@@ -856,7 +855,7 @@ hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
856 } 855 }
857} 856}
858 857
859/** 858/*
860 * Handler to resume sending fcxp when space in available in cpe queue. 859 * Handler to resume sending fcxp when space in available in cpe queue.
861 */ 860 */
862static void 861static void
@@ -871,7 +870,7 @@ bfa_fcxp_qresume(void *cbarg)
871 bfa_fcxp_queue(fcxp, send_req); 870 bfa_fcxp_queue(fcxp, send_req);
872} 871}
873 872
874/** 873/*
875 * Queue fcxp send request to foimrware. 874 * Queue fcxp send request to foimrware.
876 */ 875 */
877static void 876static void
@@ -885,26 +884,26 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
885 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, 884 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
886 bfa_lpuid(bfa)); 885 bfa_lpuid(bfa));
887 886
888 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag); 887 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
889 if (rport) { 888 if (rport) {
890 send_req->rport_fw_hndl = rport->fw_handle; 889 send_req->rport_fw_hndl = rport->fw_handle;
891 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz); 890 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
892 if (send_req->max_frmsz == 0) 891 if (send_req->max_frmsz == 0)
893 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); 892 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
894 } else { 893 } else {
895 send_req->rport_fw_hndl = 0; 894 send_req->rport_fw_hndl = 0;
896 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); 895 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
897 } 896 }
898 897
899 send_req->vf_id = bfa_os_htons(reqi->vf_id); 898 send_req->vf_id = cpu_to_be16(reqi->vf_id);
900 send_req->lp_tag = reqi->lp_tag; 899 send_req->lp_tag = reqi->lp_tag;
901 send_req->class = reqi->class; 900 send_req->class = reqi->class;
902 send_req->rsp_timeout = rspi->rsp_timeout; 901 send_req->rsp_timeout = rspi->rsp_timeout;
903 send_req->cts = reqi->cts; 902 send_req->cts = reqi->cts;
904 send_req->fchs = reqi->fchs; 903 send_req->fchs = reqi->fchs;
905 904
906 send_req->req_len = bfa_os_htonl(reqi->req_tot_len); 905 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
907 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen); 906 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
908 907
909 /* 908 /*
910 * setup req sgles 909 * setup req sgles
@@ -955,11 +954,11 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
955 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 954 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
956} 955}
957 956
958/** 957/*
959 * hal_fcxp_api BFA FCXP API 958 * hal_fcxp_api BFA FCXP API
960 */ 959 */
961 960
962/** 961/*
963 * Allocate an FCXP instance to send a response or to send a request 962 * Allocate an FCXP instance to send a response or to send a request
964 * that has a response. Request/response buffers are allocated by caller. 963 * that has a response. Request/response buffers are allocated by caller.
965 * 964 *
@@ -1005,7 +1004,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
1005 return fcxp; 1004 return fcxp;
1006} 1005}
1007 1006
1008/** 1007/*
1009 * Get the internal request buffer pointer 1008 * Get the internal request buffer pointer
1010 * 1009 *
1011 * @param[in] fcxp BFA fcxp pointer 1010 * @param[in] fcxp BFA fcxp pointer
@@ -1032,7 +1031,7 @@ bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1032 return mod->req_pld_sz; 1031 return mod->req_pld_sz;
1033} 1032}
1034 1033
1035/** 1034/*
1036 * Get the internal response buffer pointer 1035 * Get the internal response buffer pointer
1037 * 1036 *
1038 * @param[in] fcxp BFA fcxp pointer 1037 * @param[in] fcxp BFA fcxp pointer
@@ -1052,7 +1051,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1052 return rspbuf; 1051 return rspbuf;
1053} 1052}
1054 1053
1055/** 1054/*
1056 * Free the BFA FCXP 1055 * Free the BFA FCXP
1057 * 1056 *
1058 * @param[in] fcxp BFA fcxp pointer 1057 * @param[in] fcxp BFA fcxp pointer
@@ -1069,7 +1068,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1069 bfa_fcxp_put(fcxp); 1068 bfa_fcxp_put(fcxp);
1070} 1069}
1071 1070
1072/** 1071/*
1073 * Send a FCXP request 1072 * Send a FCXP request
1074 * 1073 *
1075 * @param[in] fcxp BFA fcxp pointer 1074 * @param[in] fcxp BFA fcxp pointer
@@ -1103,7 +1102,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1103 1102
1104 bfa_trc(bfa, fcxp->fcxp_tag); 1103 bfa_trc(bfa, fcxp->fcxp_tag);
1105 1104
1106 /** 1105 /*
1107 * setup request/response info 1106 * setup request/response info
1108 */ 1107 */
1109 reqi->bfa_rport = rport; 1108 reqi->bfa_rport = rport;
@@ -1118,7 +1117,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1118 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; 1117 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1119 fcxp->send_cbarg = cbarg; 1118 fcxp->send_cbarg = cbarg;
1120 1119
1121 /** 1120 /*
1122 * If no room in CPE queue, wait for space in request queue 1121 * If no room in CPE queue, wait for space in request queue
1123 */ 1122 */
1124 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); 1123 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
@@ -1132,7 +1131,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1132 bfa_fcxp_queue(fcxp, send_req); 1131 bfa_fcxp_queue(fcxp, send_req);
1133} 1132}
1134 1133
1135/** 1134/*
1136 * Abort a BFA FCXP 1135 * Abort a BFA FCXP
1137 * 1136 *
1138 * @param[in] fcxp BFA fcxp pointer 1137 * @param[in] fcxp BFA fcxp pointer
@@ -1186,7 +1185,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1186void 1185void
1187bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) 1186bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1188{ 1187{
1189 /** 1188 /*
1190 * If waiting for room in request queue, cancel reqq wait 1189 * If waiting for room in request queue, cancel reqq wait
1191 * and free fcxp. 1190 * and free fcxp.
1192 */ 1191 */
@@ -1202,7 +1201,7 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1202 1201
1203 1202
1204 1203
1205/** 1204/*
1206 * hal_fcxp_public BFA FCXP public functions 1205 * hal_fcxp_public BFA FCXP public functions
1207 */ 1206 */
1208 1207
@@ -1229,11 +1228,11 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1229} 1228}
1230 1229
1231 1230
1232/** 1231/*
1233 * BFA LPS state machine functions 1232 * BFA LPS state machine functions
1234 */ 1233 */
1235 1234
1236/** 1235/*
1237 * Init state -- no login 1236 * Init state -- no login
1238 */ 1237 */
1239static void 1238static void
@@ -1285,7 +1284,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1285 } 1284 }
1286} 1285}
1287 1286
1288/** 1287/*
1289 * login is in progress -- awaiting response from firmware 1288 * login is in progress -- awaiting response from firmware
1290 */ 1289 */
1291static void 1290static void
@@ -1327,7 +1326,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1327 } 1326 }
1328} 1327}
1329 1328
1330/** 1329/*
1331 * login pending - awaiting space in request queue 1330 * login pending - awaiting space in request queue
1332 */ 1331 */
1333static void 1332static void
@@ -1359,7 +1358,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1359 } 1358 }
1360} 1359}
1361 1360
1362/** 1361/*
1363 * login complete 1362 * login complete
1364 */ 1363 */
1365static void 1364static void
@@ -1400,7 +1399,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1400 } 1399 }
1401} 1400}
1402 1401
1403/** 1402/*
1404 * logout in progress - awaiting firmware response 1403 * logout in progress - awaiting firmware response
1405 */ 1404 */
1406static void 1405static void
@@ -1424,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1424 } 1423 }
1425} 1424}
1426 1425
1427/** 1426/*
1428 * logout pending -- awaiting space in request queue 1427 * logout pending -- awaiting space in request queue
1429 */ 1428 */
1430static void 1429static void
@@ -1451,11 +1450,11 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1451 1450
1452 1451
1453 1452
1454/** 1453/*
1455 * lps_pvt BFA LPS private functions 1454 * lps_pvt BFA LPS private functions
1456 */ 1455 */
1457 1456
1458/** 1457/*
1459 * return memory requirement 1458 * return memory requirement
1460 */ 1459 */
1461static void 1460static void
@@ -1468,7 +1467,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1468 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1467 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1469} 1468}
1470 1469
1471/** 1470/*
1472 * bfa module attach at initialization time 1471 * bfa module attach at initialization time
1473 */ 1472 */
1474static void 1473static void
@@ -1479,7 +1478,7 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1479 struct bfa_lps_s *lps; 1478 struct bfa_lps_s *lps;
1480 int i; 1479 int i;
1481 1480
1482 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s)); 1481 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1483 mod->num_lps = BFA_LPS_MAX_LPORTS; 1482 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484 if (cfg->drvcfg.min_cfg) 1483 if (cfg->drvcfg.min_cfg)
1485 mod->num_lps = BFA_LPS_MIN_LPORTS; 1484 mod->num_lps = BFA_LPS_MIN_LPORTS;
@@ -1516,7 +1515,7 @@ bfa_lps_stop(struct bfa_s *bfa)
1516{ 1515{
1517} 1516}
1518 1517
1519/** 1518/*
1520 * IOC in disabled state -- consider all lps offline 1519 * IOC in disabled state -- consider all lps offline
1521 */ 1520 */
1522static void 1521static void
@@ -1532,7 +1531,7 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
1532 } 1531 }
1533} 1532}
1534 1533
1535/** 1534/*
1536 * Firmware login response 1535 * Firmware login response
1537 */ 1536 */
1538static void 1537static void
@@ -1550,7 +1549,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1550 lps->fport = rsp->f_port; 1549 lps->fport = rsp->f_port;
1551 lps->npiv_en = rsp->npiv_en; 1550 lps->npiv_en = rsp->npiv_en;
1552 lps->lp_pid = rsp->lp_pid; 1551 lps->lp_pid = rsp->lp_pid;
1553 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit); 1552 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1554 lps->pr_pwwn = rsp->port_name; 1553 lps->pr_pwwn = rsp->port_name;
1555 lps->pr_nwwn = rsp->node_name; 1554 lps->pr_nwwn = rsp->node_name;
1556 lps->auth_req = rsp->auth_req; 1555 lps->auth_req = rsp->auth_req;
@@ -1579,7 +1578,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1579 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1578 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1580} 1579}
1581 1580
1582/** 1581/*
1583 * Firmware logout response 1582 * Firmware logout response
1584 */ 1583 */
1585static void 1584static void
@@ -1594,7 +1593,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1593 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595} 1594}
1596 1595
1597/** 1596/*
1598 * Firmware received a Clear virtual link request (for FCoE) 1597 * Firmware received a Clear virtual link request (for FCoE)
1599 */ 1598 */
1600static void 1599static void
@@ -1608,7 +1607,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1608 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1607 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1609} 1608}
1610 1609
1611/** 1610/*
1612 * Space is available in request queue, resume queueing request to firmware. 1611 * Space is available in request queue, resume queueing request to firmware.
1613 */ 1612 */
1614static void 1613static void
@@ -1619,7 +1618,7 @@ bfa_lps_reqq_resume(void *lps_arg)
1619 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); 1618 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1620} 1619}
1621 1620
1622/** 1621/*
1623 * lps is freed -- triggered by vport delete 1622 * lps is freed -- triggered by vport delete
1624 */ 1623 */
1625static void 1624static void
@@ -1632,7 +1631,7 @@ bfa_lps_free(struct bfa_lps_s *lps)
1632 list_add_tail(&lps->qe, &mod->lps_free_q); 1631 list_add_tail(&lps->qe, &mod->lps_free_q);
1633} 1632}
1634 1633
1635/** 1634/*
1636 * send login request to firmware 1635 * send login request to firmware
1637 */ 1636 */
1638static void 1637static void
@@ -1648,7 +1647,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
1648 1647
1649 m->lp_tag = lps->lp_tag; 1648 m->lp_tag = lps->lp_tag;
1650 m->alpa = lps->alpa; 1649 m->alpa = lps->alpa;
1651 m->pdu_size = bfa_os_htons(lps->pdusz); 1650 m->pdu_size = cpu_to_be16(lps->pdusz);
1652 m->pwwn = lps->pwwn; 1651 m->pwwn = lps->pwwn;
1653 m->nwwn = lps->nwwn; 1652 m->nwwn = lps->nwwn;
1654 m->fdisc = lps->fdisc; 1653 m->fdisc = lps->fdisc;
@@ -1657,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
1657 bfa_reqq_produce(lps->bfa, lps->reqq); 1656 bfa_reqq_produce(lps->bfa, lps->reqq);
1658} 1657}
1659 1658
1660/** 1659/*
1661 * send logout request to firmware 1660 * send logout request to firmware
1662 */ 1661 */
1663static void 1662static void
@@ -1676,7 +1675,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
1676 bfa_reqq_produce(lps->bfa, lps->reqq); 1675 bfa_reqq_produce(lps->bfa, lps->reqq);
1677} 1676}
1678 1677
1679/** 1678/*
1680 * Indirect login completion handler for non-fcs 1679 * Indirect login completion handler for non-fcs
1681 */ 1680 */
1682static void 1681static void
@@ -1693,7 +1692,7 @@ bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1692 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1694} 1693}
1695 1694
1696/** 1695/*
1697 * Login completion handler -- direct call for fcs, queue for others 1696 * Login completion handler -- direct call for fcs, queue for others
1698 */ 1697 */
1699static void 1698static void
@@ -1711,7 +1710,7 @@ bfa_lps_login_comp(struct bfa_lps_s *lps)
1711 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1710 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1712} 1711}
1713 1712
1714/** 1713/*
1715 * Indirect logout completion handler for non-fcs 1714 * Indirect logout completion handler for non-fcs
1716 */ 1715 */
1717static void 1716static void
@@ -1726,7 +1725,7 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1726 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1725 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1727} 1726}
1728 1727
1729/** 1728/*
1730 * Logout completion handler -- direct call for fcs, queue for others 1729 * Logout completion handler -- direct call for fcs, queue for others
1731 */ 1730 */
1732static void 1731static void
@@ -1741,7 +1740,7 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
1741 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1740 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1742} 1741}
1743 1742
1744/** 1743/*
1745 * Clear virtual link completion handler for non-fcs 1744 * Clear virtual link completion handler for non-fcs
1746 */ 1745 */
1747static void 1746static void
@@ -1757,7 +1756,7 @@ bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1756 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1758} 1757}
1759 1758
1760/** 1759/*
1761 * Received Clear virtual link event --direct call for fcs, 1760 * Received Clear virtual link event --direct call for fcs,
1762 * queue for others 1761 * queue for others
1763 */ 1762 */
@@ -1777,7 +1776,7 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps)
1777 1776
1778 1777
1779 1778
1780/** 1779/*
1781 * lps_public BFA LPS public functions 1780 * lps_public BFA LPS public functions
1782 */ 1781 */
1783 1782
@@ -1790,7 +1789,7 @@ bfa_lps_get_max_vport(struct bfa_s *bfa)
1790 return BFA_LPS_MAX_VPORTS_SUPP_CB; 1789 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1791} 1790}
1792 1791
1793/** 1792/*
1794 * Allocate a lport srvice tag. 1793 * Allocate a lport srvice tag.
1795 */ 1794 */
1796struct bfa_lps_s * 1795struct bfa_lps_s *
@@ -1810,7 +1809,7 @@ bfa_lps_alloc(struct bfa_s *bfa)
1810 return lps; 1809 return lps;
1811} 1810}
1812 1811
1813/** 1812/*
1814 * Free lport service tag. This can be called anytime after an alloc. 1813 * Free lport service tag. This can be called anytime after an alloc.
1815 * No need to wait for any pending login/logout completions. 1814 * No need to wait for any pending login/logout completions.
1816 */ 1815 */
@@ -1820,7 +1819,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
1820 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); 1819 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1821} 1820}
1822 1821
1823/** 1822/*
1824 * Initiate a lport login. 1823 * Initiate a lport login.
1825 */ 1824 */
1826void 1825void
@@ -1837,7 +1836,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1837 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1836 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1838} 1837}
1839 1838
1840/** 1839/*
1841 * Initiate a lport fdisc login. 1840 * Initiate a lport fdisc login.
1842 */ 1841 */
1843void 1842void
@@ -1854,7 +1853,7 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1854 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1853 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1855} 1854}
1856 1855
1857/** 1856/*
1858 * Initiate a lport logout (flogi). 1857 * Initiate a lport logout (flogi).
1859 */ 1858 */
1860void 1859void
@@ -1863,7 +1862,7 @@ bfa_lps_flogo(struct bfa_lps_s *lps)
1863 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1862 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1864} 1863}
1865 1864
1866/** 1865/*
1867 * Initiate a lport FDSIC logout. 1866 * Initiate a lport FDSIC logout.
1868 */ 1867 */
1869void 1868void
@@ -1872,7 +1871,7 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1872 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1873} 1872}
1874 1873
1875/** 1874/*
1876 * Discard a pending login request -- should be called only for 1875 * Discard a pending login request -- should be called only for
1877 * link down handling. 1876 * link down handling.
1878 */ 1877 */
@@ -1882,7 +1881,7 @@ bfa_lps_discard(struct bfa_lps_s *lps)
1882 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1881 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1883} 1882}
1884 1883
1885/** 1884/*
1886 * Return lport services tag 1885 * Return lport services tag
1887 */ 1886 */
1888u8 1887u8
@@ -1891,7 +1890,7 @@ bfa_lps_get_tag(struct bfa_lps_s *lps)
1891 return lps->lp_tag; 1890 return lps->lp_tag;
1892} 1891}
1893 1892
1894/** 1893/*
1895 * Return lport services tag given the pid 1894 * Return lport services tag given the pid
1896 */ 1895 */
1897u8 1896u8
@@ -1910,7 +1909,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1910 return 0; 1909 return 0;
1911} 1910}
1912 1911
1913/** 1912/*
1914 * return if fabric login indicates support for NPIV 1913 * return if fabric login indicates support for NPIV
1915 */ 1914 */
1916bfa_boolean_t 1915bfa_boolean_t
@@ -1919,7 +1918,7 @@ bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1919 return lps->npiv_en; 1918 return lps->npiv_en;
1920} 1919}
1921 1920
1922/** 1921/*
1923 * Return TRUE if attached to F-Port, else return FALSE 1922 * Return TRUE if attached to F-Port, else return FALSE
1924 */ 1923 */
1925bfa_boolean_t 1924bfa_boolean_t
@@ -1928,7 +1927,7 @@ bfa_lps_is_fport(struct bfa_lps_s *lps)
1928 return lps->fport; 1927 return lps->fport;
1929} 1928}
1930 1929
1931/** 1930/*
1932 * Return TRUE if attached to a Brocade Fabric 1931 * Return TRUE if attached to a Brocade Fabric
1933 */ 1932 */
1934bfa_boolean_t 1933bfa_boolean_t
@@ -1936,7 +1935,7 @@ bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1936{ 1935{
1937 return lps->brcd_switch; 1936 return lps->brcd_switch;
1938} 1937}
1939/** 1938/*
1940 * return TRUE if authentication is required 1939 * return TRUE if authentication is required
1941 */ 1940 */
1942bfa_boolean_t 1941bfa_boolean_t
@@ -1951,7 +1950,7 @@ bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1951 return lps->ext_status; 1950 return lps->ext_status;
1952} 1951}
1953 1952
1954/** 1953/*
1955 * return port id assigned to the lport 1954 * return port id assigned to the lport
1956 */ 1955 */
1957u32 1956u32
@@ -1960,7 +1959,7 @@ bfa_lps_get_pid(struct bfa_lps_s *lps)
1960 return lps->lp_pid; 1959 return lps->lp_pid;
1961} 1960}
1962 1961
1963/** 1962/*
1964 * return port id assigned to the base lport 1963 * return port id assigned to the base lport
1965 */ 1964 */
1966u32 1965u32
@@ -1971,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
1971 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; 1970 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1972} 1971}
1973 1972
1974/** 1973/*
1975 * Return bb_credit assigned in FLOGI response 1974 * Return bb_credit assigned in FLOGI response
1976 */ 1975 */
1977u16 1976u16
@@ -1980,7 +1979,7 @@ bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1980 return lps->pr_bbcred; 1979 return lps->pr_bbcred;
1981} 1980}
1982 1981
1983/** 1982/*
1984 * Return peer port name 1983 * Return peer port name
1985 */ 1984 */
1986wwn_t 1985wwn_t
@@ -1989,7 +1988,7 @@ bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1989 return lps->pr_pwwn; 1988 return lps->pr_pwwn;
1990} 1989}
1991 1990
1992/** 1991/*
1993 * Return peer node name 1992 * Return peer node name
1994 */ 1993 */
1995wwn_t 1994wwn_t
@@ -1998,7 +1997,7 @@ bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1998 return lps->pr_nwwn; 1997 return lps->pr_nwwn;
1999} 1998}
2000 1999
2001/** 2000/*
2002 * return reason code if login request is rejected 2001 * return reason code if login request is rejected
2003 */ 2002 */
2004u8 2003u8
@@ -2007,7 +2006,7 @@ bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2007 return lps->lsrjt_rsn; 2006 return lps->lsrjt_rsn;
2008} 2007}
2009 2008
2010/** 2009/*
2011 * return explanation code if login request is rejected 2010 * return explanation code if login request is rejected
2012 */ 2011 */
2013u8 2012u8
@@ -2016,7 +2015,7 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2016 return lps->lsrjt_expl; 2015 return lps->lsrjt_expl;
2017} 2016}
2018 2017
2019/** 2018/*
2020 * Return fpma/spma MAC for lport 2019 * Return fpma/spma MAC for lport
2021 */ 2020 */
2022mac_t 2021mac_t
@@ -2025,7 +2024,7 @@ bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2025 return lps->lp_mac; 2024 return lps->lp_mac;
2026} 2025}
2027 2026
2028/** 2027/*
2029 * LPS firmware message class handler. 2028 * LPS firmware message class handler.
2030 */ 2029 */
2031void 2030void
@@ -2055,7 +2054,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2055 } 2054 }
2056} 2055}
2057 2056
2058/** 2057/*
2059 * FC PORT state machine functions 2058 * FC PORT state machine functions
2060 */ 2059 */
2061static void 2060static void
@@ -2066,7 +2065,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2066 2065
2067 switch (event) { 2066 switch (event) {
2068 case BFA_FCPORT_SM_START: 2067 case BFA_FCPORT_SM_START:
2069 /** 2068 /*
2070 * Start event after IOC is configured and BFA is started. 2069 * Start event after IOC is configured and BFA is started.
2071 */ 2070 */
2072 if (bfa_fcport_send_enable(fcport)) { 2071 if (bfa_fcport_send_enable(fcport)) {
@@ -2080,7 +2079,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2080 break; 2079 break;
2081 2080
2082 case BFA_FCPORT_SM_ENABLE: 2081 case BFA_FCPORT_SM_ENABLE:
2083 /** 2082 /*
2084 * Port is persistently configured to be in enabled state. Do 2083 * Port is persistently configured to be in enabled state. Do
2085 * not change state. Port enabling is done when START event is 2084 * not change state. Port enabling is done when START event is
2086 * received. 2085 * received.
@@ -2088,7 +2087,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2088 break; 2087 break;
2089 2088
2090 case BFA_FCPORT_SM_DISABLE: 2089 case BFA_FCPORT_SM_DISABLE:
2091 /** 2090 /*
2092 * If a port is persistently configured to be disabled, the 2091 * If a port is persistently configured to be disabled, the
2093 * first event will a port disable request. 2092 * first event will a port disable request.
2094 */ 2093 */
@@ -2124,13 +2123,13 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2124 break; 2123 break;
2125 2124
2126 case BFA_FCPORT_SM_ENABLE: 2125 case BFA_FCPORT_SM_ENABLE:
2127 /** 2126 /*
2128 * Already enable is in progress. 2127 * Already enable is in progress.
2129 */ 2128 */
2130 break; 2129 break;
2131 2130
2132 case BFA_FCPORT_SM_DISABLE: 2131 case BFA_FCPORT_SM_DISABLE:
2133 /** 2132 /*
2134 * Just send disable request to firmware when room becomes 2133 * Just send disable request to firmware when room becomes
2135 * available in request queue. 2134 * available in request queue.
2136 */ 2135 */
@@ -2145,7 +2144,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2145 2144
2146 case BFA_FCPORT_SM_LINKUP: 2145 case BFA_FCPORT_SM_LINKUP:
2147 case BFA_FCPORT_SM_LINKDOWN: 2146 case BFA_FCPORT_SM_LINKDOWN:
2148 /** 2147 /*
2149 * Possible to get link events when doing back-to-back 2148 * Possible to get link events when doing back-to-back
2150 * enable/disables. 2149 * enable/disables.
2151 */ 2150 */
@@ -2184,7 +2183,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2184 break; 2183 break;
2185 2184
2186 case BFA_FCPORT_SM_ENABLE: 2185 case BFA_FCPORT_SM_ENABLE:
2187 /** 2186 /*
2188 * Already being enabled. 2187 * Already being enabled.
2189 */ 2188 */
2190 break; 2189 break;
@@ -2257,13 +2256,13 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2257 break; 2256 break;
2258 2257
2259 case BFA_FCPORT_SM_LINKDOWN: 2258 case BFA_FCPORT_SM_LINKDOWN:
2260 /** 2259 /*
2261 * Possible to get link down event. 2260 * Possible to get link down event.
2262 */ 2261 */
2263 break; 2262 break;
2264 2263
2265 case BFA_FCPORT_SM_ENABLE: 2264 case BFA_FCPORT_SM_ENABLE:
2266 /** 2265 /*
2267 * Already enabled. 2266 * Already enabled.
2268 */ 2267 */
2269 break; 2268 break;
@@ -2306,7 +2305,7 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2306 2305
2307 switch (event) { 2306 switch (event) {
2308 case BFA_FCPORT_SM_ENABLE: 2307 case BFA_FCPORT_SM_ENABLE:
2309 /** 2308 /*
2310 * Already enabled. 2309 * Already enabled.
2311 */ 2310 */
2312 break; 2311 break;
@@ -2399,14 +2398,14 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2399 break; 2398 break;
2400 2399
2401 case BFA_FCPORT_SM_DISABLE: 2400 case BFA_FCPORT_SM_DISABLE:
2402 /** 2401 /*
2403 * Already being disabled. 2402 * Already being disabled.
2404 */ 2403 */
2405 break; 2404 break;
2406 2405
2407 case BFA_FCPORT_SM_LINKUP: 2406 case BFA_FCPORT_SM_LINKUP:
2408 case BFA_FCPORT_SM_LINKDOWN: 2407 case BFA_FCPORT_SM_LINKDOWN:
2409 /** 2408 /*
2410 * Possible to get link events when doing back-to-back 2409 * Possible to get link events when doing back-to-back
2411 * enable/disables. 2410 * enable/disables.
2412 */ 2411 */
@@ -2453,7 +2452,7 @@ bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2453 2452
2454 case BFA_FCPORT_SM_LINKUP: 2453 case BFA_FCPORT_SM_LINKUP:
2455 case BFA_FCPORT_SM_LINKDOWN: 2454 case BFA_FCPORT_SM_LINKDOWN:
2456 /** 2455 /*
2457 * Possible to get link events when doing back-to-back 2456 * Possible to get link events when doing back-to-back
2458 * enable/disables. 2457 * enable/disables.
2459 */ 2458 */
@@ -2483,7 +2482,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2483 break; 2482 break;
2484 2483
2485 case BFA_FCPORT_SM_DISABLE: 2484 case BFA_FCPORT_SM_DISABLE:
2486 /** 2485 /*
2487 * Already being disabled. 2486 * Already being disabled.
2488 */ 2487 */
2489 break; 2488 break;
@@ -2508,7 +2507,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2508 2507
2509 case BFA_FCPORT_SM_LINKUP: 2508 case BFA_FCPORT_SM_LINKUP:
2510 case BFA_FCPORT_SM_LINKDOWN: 2509 case BFA_FCPORT_SM_LINKDOWN:
2511 /** 2510 /*
2512 * Possible to get link events when doing back-to-back 2511 * Possible to get link events when doing back-to-back
2513 * enable/disables. 2512 * enable/disables.
2514 */ 2513 */
@@ -2533,7 +2532,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2533 2532
2534 switch (event) { 2533 switch (event) {
2535 case BFA_FCPORT_SM_START: 2534 case BFA_FCPORT_SM_START:
2536 /** 2535 /*
2537 * Ignore start event for a port that is disabled. 2536 * Ignore start event for a port that is disabled.
2538 */ 2537 */
2539 break; 2538 break;
@@ -2557,7 +2556,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2557 break; 2556 break;
2558 2557
2559 case BFA_FCPORT_SM_DISABLE: 2558 case BFA_FCPORT_SM_DISABLE:
2560 /** 2559 /*
2561 * Already disabled. 2560 * Already disabled.
2562 */ 2561 */
2563 break; 2562 break;
@@ -2587,14 +2586,14 @@ bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2587 break; 2586 break;
2588 2587
2589 default: 2588 default:
2590 /** 2589 /*
2591 * Ignore all other events. 2590 * Ignore all other events.
2592 */ 2591 */
2593 ; 2592 ;
2594 } 2593 }
2595} 2594}
2596 2595
2597/** 2596/*
2598 * Port is enabled. IOC is down/failed. 2597 * Port is enabled. IOC is down/failed.
2599 */ 2598 */
2600static void 2599static void
@@ -2613,14 +2612,14 @@ bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2613 break; 2612 break;
2614 2613
2615 default: 2614 default:
2616 /** 2615 /*
2617 * Ignore all events. 2616 * Ignore all events.
2618 */ 2617 */
2619 ; 2618 ;
2620 } 2619 }
2621} 2620}
2622 2621
2623/** 2622/*
2624 * Port is disabled. IOC is down/failed. 2623 * Port is disabled. IOC is down/failed.
2625 */ 2624 */
2626static void 2625static void
@@ -2639,14 +2638,14 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2639 break; 2638 break;
2640 2639
2641 default: 2640 default:
2642 /** 2641 /*
2643 * Ignore all events. 2642 * Ignore all events.
2644 */ 2643 */
2645 ; 2644 ;
2646 } 2645 }
2647} 2646}
2648 2647
2649/** 2648/*
2650 * Link state is down 2649 * Link state is down
2651 */ 2650 */
2652static void 2651static void
@@ -2666,7 +2665,7 @@ bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2666 } 2665 }
2667} 2666}
2668 2667
2669/** 2668/*
2670 * Link state is waiting for down notification 2669 * Link state is waiting for down notification
2671 */ 2670 */
2672static void 2671static void
@@ -2689,7 +2688,7 @@ bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2689 } 2688 }
2690} 2689}
2691 2690
2692/** 2691/*
2693 * Link state is waiting for down notification and there is a pending up 2692 * Link state is waiting for down notification and there is a pending up
2694 */ 2693 */
2695static void 2694static void
@@ -2713,7 +2712,7 @@ bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2713 } 2712 }
2714} 2713}
2715 2714
2716/** 2715/*
2717 * Link state is up 2716 * Link state is up
2718 */ 2717 */
2719static void 2718static void
@@ -2733,7 +2732,7 @@ bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2733 } 2732 }
2734} 2733}
2735 2734
2736/** 2735/*
2737 * Link state is waiting for up notification 2736 * Link state is waiting for up notification
2738 */ 2737 */
2739static void 2738static void
@@ -2756,7 +2755,7 @@ bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2756 } 2755 }
2757} 2756}
2758 2757
2759/** 2758/*
2760 * Link state is waiting for up notification and there is a pending down 2759 * Link state is waiting for up notification and there is a pending down
2761 */ 2760 */
2762static void 2761static void
@@ -2780,7 +2779,7 @@ bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2780 } 2779 }
2781} 2780}
2782 2781
2783/** 2782/*
2784 * Link state is waiting for up notification and there are pending down and up 2783 * Link state is waiting for up notification and there are pending down and up
2785 */ 2784 */
2786static void 2785static void
@@ -2806,7 +2805,7 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2806 2805
2807 2806
2808 2807
2809/** 2808/*
2810 * hal_port_private 2809 * hal_port_private
2811 */ 2810 */
2812 2811
@@ -2821,7 +2820,7 @@ __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2821 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2820 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2822} 2821}
2823 2822
2824/** 2823/*
2825 * Send SCN notification to upper layers. 2824 * Send SCN notification to upper layers.
2826 * trunk - false if caller is fcport to ignore fcport event in trunked mode 2825 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2827 */ 2826 */
@@ -2897,7 +2896,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2897 bfa_meminfo_dma_phys(meminfo) = dm_pa; 2896 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2898} 2897}
2899 2898
2900/** 2899/*
2901 * Memory initialization. 2900 * Memory initialization.
2902 */ 2901 */
2903static void 2902static void
@@ -2909,7 +2908,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2909 struct bfa_fcport_ln_s *ln = &fcport->ln; 2908 struct bfa_fcport_ln_s *ln = &fcport->ln;
2910 struct bfa_timeval_s tv; 2909 struct bfa_timeval_s tv;
2911 2910
2912 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); 2911 memset(fcport, 0, sizeof(struct bfa_fcport_s));
2913 fcport->bfa = bfa; 2912 fcport->bfa = bfa;
2914 ln->fcport = fcport; 2913 ln->fcport = fcport;
2915 2914
@@ -2918,13 +2917,13 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2918 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2917 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2919 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2918 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2920 2919
2921 /** 2920 /*
2922 * initialize time stamp for stats reset 2921 * initialize time stamp for stats reset
2923 */ 2922 */
2924 bfa_os_gettimeofday(&tv); 2923 bfa_os_gettimeofday(&tv);
2925 fcport->stats_reset_time = tv.tv_sec; 2924 fcport->stats_reset_time = tv.tv_sec;
2926 2925
2927 /** 2926 /*
2928 * initialize and set default configuration 2927 * initialize and set default configuration
2929 */ 2928 */
2930 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; 2929 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
@@ -2942,7 +2941,7 @@ bfa_fcport_detach(struct bfa_s *bfa)
2942{ 2941{
2943} 2942}
2944 2943
2945/** 2944/*
2946 * Called when IOC is ready. 2945 * Called when IOC is ready.
2947 */ 2946 */
2948static void 2947static void
@@ -2951,7 +2950,7 @@ bfa_fcport_start(struct bfa_s *bfa)
2951 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 2950 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2952} 2951}
2953 2952
2954/** 2953/*
2955 * Called before IOC is stopped. 2954 * Called before IOC is stopped.
2956 */ 2955 */
2957static void 2956static void
@@ -2961,7 +2960,7 @@ bfa_fcport_stop(struct bfa_s *bfa)
2961 bfa_trunk_iocdisable(bfa); 2960 bfa_trunk_iocdisable(bfa);
2962} 2961}
2963 2962
2964/** 2963/*
2965 * Called when IOC failure is detected. 2964 * Called when IOC failure is detected.
2966 */ 2965 */
2967static void 2966static void
@@ -2986,18 +2985,17 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2986 fcport->myalpa = 0; 2985 fcport->myalpa = 0;
2987 2986
2988 /* QoS Details */ 2987 /* QoS Details */
2989 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); 2988 fcport->qos_attr = pevent->link_state.qos_attr;
2990 bfa_os_assign(fcport->qos_vc_attr, 2989 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2991 pevent->link_state.vc_fcf.qos_vc_attr);
2992 2990
2993 /** 2991 /*
2994 * update trunk state if applicable 2992 * update trunk state if applicable
2995 */ 2993 */
2996 if (!fcport->cfg.trunked) 2994 if (!fcport->cfg.trunked)
2997 trunk->attr.state = BFA_TRUNK_DISABLED; 2995 trunk->attr.state = BFA_TRUNK_DISABLED;
2998 2996
2999 /* update FCoE specific */ 2997 /* update FCoE specific */
3000 fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan); 2998 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
3001 2999
3002 bfa_trc(fcport->bfa, fcport->speed); 3000 bfa_trc(fcport->bfa, fcport->speed);
3003 bfa_trc(fcport->bfa, fcport->topology); 3001 bfa_trc(fcport->bfa, fcport->topology);
@@ -3010,7 +3008,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3010 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 3008 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3011} 3009}
3012 3010
3013/** 3011/*
3014 * Send port enable message to firmware. 3012 * Send port enable message to firmware.
3015 */ 3013 */
3016static bfa_boolean_t 3014static bfa_boolean_t
@@ -3018,13 +3016,13 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3018{ 3016{
3019 struct bfi_fcport_enable_req_s *m; 3017 struct bfi_fcport_enable_req_s *m;
3020 3018
3021 /** 3019 /*
3022 * Increment message tag before queue check, so that responses to old 3020 * Increment message tag before queue check, so that responses to old
3023 * requests are discarded. 3021 * requests are discarded.
3024 */ 3022 */
3025 fcport->msgtag++; 3023 fcport->msgtag++;
3026 3024
3027 /** 3025 /*
3028 * check for room in queue to send request now 3026 * check for room in queue to send request now
3029 */ 3027 */
3030 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3028 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3040,19 +3038,19 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3040 m->pwwn = fcport->pwwn; 3038 m->pwwn = fcport->pwwn;
3041 m->port_cfg = fcport->cfg; 3039 m->port_cfg = fcport->cfg;
3042 m->msgtag = fcport->msgtag; 3040 m->msgtag = fcport->msgtag;
3043 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); 3041 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3044 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); 3042 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3045 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 3043 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3046 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 3044 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3047 3045
3048 /** 3046 /*
3049 * queue I/O message to firmware 3047 * queue I/O message to firmware
3050 */ 3048 */
3051 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3049 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3052 return BFA_TRUE; 3050 return BFA_TRUE;
3053} 3051}
3054 3052
3055/** 3053/*
3056 * Send port disable message to firmware. 3054 * Send port disable message to firmware.
3057 */ 3055 */
3058static bfa_boolean_t 3056static bfa_boolean_t
@@ -3060,13 +3058,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3060{ 3058{
3061 struct bfi_fcport_req_s *m; 3059 struct bfi_fcport_req_s *m;
3062 3060
3063 /** 3061 /*
3064 * Increment message tag before queue check, so that responses to old 3062 * Increment message tag before queue check, so that responses to old
3065 * requests are discarded. 3063 * requests are discarded.
3066 */ 3064 */
3067 fcport->msgtag++; 3065 fcport->msgtag++;
3068 3066
3069 /** 3067 /*
3070 * check for room in queue to send request now 3068 * check for room in queue to send request now
3071 */ 3069 */
3072 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3070 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3080,7 +3078,7 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3080 bfa_lpuid(fcport->bfa)); 3078 bfa_lpuid(fcport->bfa));
3081 m->msgtag = fcport->msgtag; 3079 m->msgtag = fcport->msgtag;
3082 3080
3083 /** 3081 /*
3084 * queue I/O message to firmware 3082 * queue I/O message to firmware
3085 */ 3083 */
3086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3084 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3105,7 +3103,7 @@ bfa_fcport_send_txcredit(void *port_cbarg)
3105 struct bfa_fcport_s *fcport = port_cbarg; 3103 struct bfa_fcport_s *fcport = port_cbarg;
3106 struct bfi_fcport_set_svc_params_req_s *m; 3104 struct bfi_fcport_set_svc_params_req_s *m;
3107 3105
3108 /** 3106 /*
3109 * check for room in queue to send request now 3107 * check for room in queue to send request now
3110 */ 3108 */
3111 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3109 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3116,9 +3114,9 @@ bfa_fcport_send_txcredit(void *port_cbarg)
3116 3114
3117 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, 3115 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3118 bfa_lpuid(fcport->bfa)); 3116 bfa_lpuid(fcport->bfa));
3119 m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit); 3117 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3120 3118
3121 /** 3119 /*
3122 * queue I/O message to firmware 3120 * queue I/O message to firmware
3123 */ 3121 */
3124 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3122 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3134,7 +3132,7 @@ bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3134 3132
3135 /* Now swap the 32 bit fields */ 3133 /* Now swap the 32 bit fields */
3136 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) 3134 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3137 dip[i] = bfa_os_ntohl(sip[i]); 3135 dip[i] = be32_to_cpu(sip[i]);
3138} 3136}
3139 3137
3140static void 3138static void
@@ -3148,11 +3146,11 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3148 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); 3146 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3149 i = i + 2) { 3147 i = i + 2) {
3150#ifdef __BIGENDIAN 3148#ifdef __BIGENDIAN
3151 dip[i] = bfa_os_ntohl(sip[i]); 3149 dip[i] = be32_to_cpu(sip[i]);
3152 dip[i + 1] = bfa_os_ntohl(sip[i + 1]); 3150 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3153#else 3151#else
3154 dip[i] = bfa_os_ntohl(sip[i + 1]); 3152 dip[i] = be32_to_cpu(sip[i + 1]);
3155 dip[i + 1] = bfa_os_ntohl(sip[i]); 3153 dip[i + 1] = be32_to_cpu(sip[i]);
3156#endif 3154#endif
3157 } 3155 }
3158} 3156}
@@ -3223,7 +3221,7 @@ bfa_fcport_send_stats_get(void *cbarg)
3223 } 3221 }
3224 fcport->stats_qfull = BFA_FALSE; 3222 fcport->stats_qfull = BFA_FALSE;
3225 3223
3226 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3224 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3227 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, 3225 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3228 bfa_lpuid(fcport->bfa)); 3226 bfa_lpuid(fcport->bfa));
3229 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3227 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3237,7 +3235,7 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3237 if (complete) { 3235 if (complete) {
3238 struct bfa_timeval_s tv; 3236 struct bfa_timeval_s tv;
3239 3237
3240 /** 3238 /*
3241 * re-initialize time stamp for stats reset 3239 * re-initialize time stamp for stats reset
3242 */ 3240 */
3243 bfa_os_gettimeofday(&tv); 3241 bfa_os_gettimeofday(&tv);
@@ -3285,13 +3283,13 @@ bfa_fcport_send_stats_clear(void *cbarg)
3285 } 3283 }
3286 fcport->stats_qfull = BFA_FALSE; 3284 fcport->stats_qfull = BFA_FALSE;
3287 3285
3288 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3286 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3289 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, 3287 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3290 bfa_lpuid(fcport->bfa)); 3288 bfa_lpuid(fcport->bfa));
3291 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3289 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3292} 3290}
3293 3291
3294/** 3292/*
3295 * Handle trunk SCN event from firmware. 3293 * Handle trunk SCN event from firmware.
3296 */ 3294 */
3297static void 3295static void
@@ -3312,7 +3310,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3312 bfa_trc(fcport->bfa, scn->trunk_state); 3310 bfa_trc(fcport->bfa, scn->trunk_state);
3313 bfa_trc(fcport->bfa, scn->trunk_speed); 3311 bfa_trc(fcport->bfa, scn->trunk_speed);
3314 3312
3315 /** 3313 /*
3316 * Save off new state for trunk attribute query 3314 * Save off new state for trunk attribute query
3317 */ 3315 */
3318 state_prev = trunk->attr.state; 3316 state_prev = trunk->attr.state;
@@ -3327,7 +3325,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3327 lattr->trunk_wwn = tlink->trunk_wwn; 3325 lattr->trunk_wwn = tlink->trunk_wwn;
3328 lattr->fctl = tlink->fctl; 3326 lattr->fctl = tlink->fctl;
3329 lattr->speed = tlink->speed; 3327 lattr->speed = tlink->speed;
3330 lattr->deskew = bfa_os_ntohl(tlink->deskew); 3328 lattr->deskew = be32_to_cpu(tlink->deskew);
3331 3329
3332 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { 3330 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3333 fcport->speed = tlink->speed; 3331 fcport->speed = tlink->speed;
@@ -3360,7 +3358,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3360 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); 3358 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3361 } 3359 }
3362 3360
3363 /** 3361 /*
3364 * Notify upper layers if trunk state changed. 3362 * Notify upper layers if trunk state changed.
3365 */ 3363 */
3366 if ((state_prev != trunk->attr.state) || 3364 if ((state_prev != trunk->attr.state) ||
@@ -3376,7 +3374,7 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
3376 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3377 int i = 0; 3375 int i = 0;
3378 3376
3379 /** 3377 /*
3380 * In trunked mode, notify upper layers that link is down 3378 * In trunked mode, notify upper layers that link is down
3381 */ 3379 */
3382 if (fcport->cfg.trunked) { 3380 if (fcport->cfg.trunked) {
@@ -3400,11 +3398,11 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
3400 3398
3401 3399
3402 3400
3403/** 3401/*
3404 * hal_port_public 3402 * hal_port_public
3405 */ 3403 */
3406 3404
3407/** 3405/*
3408 * Called to initialize port attributes 3406 * Called to initialize port attributes
3409 */ 3407 */
3410void 3408void
@@ -3412,7 +3410,7 @@ bfa_fcport_init(struct bfa_s *bfa)
3412{ 3410{
3413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3411 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3414 3412
3415 /** 3413 /*
3416 * Initialize port attributes from IOC hardware data. 3414 * Initialize port attributes from IOC hardware data.
3417 */ 3415 */
3418 bfa_fcport_set_wwns(fcport); 3416 bfa_fcport_set_wwns(fcport);
@@ -3426,7 +3424,7 @@ bfa_fcport_init(struct bfa_s *bfa)
3426 bfa_assert(fcport->speed_sup); 3424 bfa_assert(fcport->speed_sup);
3427} 3425}
3428 3426
3429/** 3427/*
3430 * Firmware message handler. 3428 * Firmware message handler.
3431 */ 3429 */
3432void 3430void
@@ -3507,11 +3505,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3507 3505
3508 3506
3509 3507
3510/** 3508/*
3511 * hal_port_api 3509 * hal_port_api
3512 */ 3510 */
3513 3511
3514/** 3512/*
3515 * Registered callback for port events. 3513 * Registered callback for port events.
3516 */ 3514 */
3517void 3515void
@@ -3552,7 +3550,7 @@ bfa_fcport_disable(struct bfa_s *bfa)
3552 return BFA_STATUS_OK; 3550 return BFA_STATUS_OK;
3553} 3551}
3554 3552
3555/** 3553/*
3556 * Configure port speed. 3554 * Configure port speed.
3557 */ 3555 */
3558bfa_status_t 3556bfa_status_t
@@ -3574,7 +3572,7 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3574 return BFA_STATUS_OK; 3572 return BFA_STATUS_OK;
3575} 3573}
3576 3574
3577/** 3575/*
3578 * Get current speed. 3576 * Get current speed.
3579 */ 3577 */
3580enum bfa_port_speed 3578enum bfa_port_speed
@@ -3585,7 +3583,7 @@ bfa_fcport_get_speed(struct bfa_s *bfa)
3585 return fcport->speed; 3583 return fcport->speed;
3586} 3584}
3587 3585
3588/** 3586/*
3589 * Configure port topology. 3587 * Configure port topology.
3590 */ 3588 */
3591bfa_status_t 3589bfa_status_t
@@ -3610,7 +3608,7 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3610 return BFA_STATUS_OK; 3608 return BFA_STATUS_OK;
3611} 3609}
3612 3610
3613/** 3611/*
3614 * Get current topology. 3612 * Get current topology.
3615 */ 3613 */
3616enum bfa_port_topology 3614enum bfa_port_topology
@@ -3710,7 +3708,7 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3710 bfa_fcport_send_txcredit(fcport); 3708 bfa_fcport_send_txcredit(fcport);
3711} 3709}
3712 3710
3713/** 3711/*
3714 * Get port attributes. 3712 * Get port attributes.
3715 */ 3713 */
3716 3714
@@ -3729,7 +3727,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3729{ 3727{
3730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3728 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3731 3729
3732 bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s)); 3730 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3733 3731
3734 attr->nwwn = fcport->nwwn; 3732 attr->nwwn = fcport->nwwn;
3735 attr->pwwn = fcport->pwwn; 3733 attr->pwwn = fcport->pwwn;
@@ -3737,7 +3735,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3737 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); 3735 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3738 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); 3736 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3739 3737
3740 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, 3738 memcpy(&attr->pport_cfg, &fcport->cfg,
3741 sizeof(struct bfa_port_cfg_s)); 3739 sizeof(struct bfa_port_cfg_s));
3742 /* speed attributes */ 3740 /* speed attributes */
3743 attr->pport_cfg.speed = fcport->cfg.speed; 3741 attr->pport_cfg.speed = fcport->cfg.speed;
@@ -3770,7 +3768,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3770 3768
3771#define BFA_FCPORT_STATS_TOV 1000 3769#define BFA_FCPORT_STATS_TOV 1000
3772 3770
3773/** 3771/*
3774 * Fetch port statistics (FCQoS or FCoE). 3772 * Fetch port statistics (FCQoS or FCoE).
3775 */ 3773 */
3776bfa_status_t 3774bfa_status_t
@@ -3796,7 +3794,7 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3796 return BFA_STATUS_OK; 3794 return BFA_STATUS_OK;
3797} 3795}
3798 3796
3799/** 3797/*
3800 * Reset port statistics (FCQoS or FCoE). 3798 * Reset port statistics (FCQoS or FCoE).
3801 */ 3799 */
3802bfa_status_t 3800bfa_status_t
@@ -3820,7 +3818,7 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3820 return BFA_STATUS_OK; 3818 return BFA_STATUS_OK;
3821} 3819}
3822 3820
3823/** 3821/*
3824 * Fetch FCQoS port statistics 3822 * Fetch FCQoS port statistics
3825 */ 3823 */
3826bfa_status_t 3824bfa_status_t
@@ -3833,7 +3831,7 @@ bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3833 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3831 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3834} 3832}
3835 3833
3836/** 3834/*
3837 * Reset FCoE port statistics 3835 * Reset FCoE port statistics
3838 */ 3836 */
3839bfa_status_t 3837bfa_status_t
@@ -3845,7 +3843,7 @@ bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3845 return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 3843 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3846} 3844}
3847 3845
3848/** 3846/*
3849 * Fetch FCQoS port statistics 3847 * Fetch FCQoS port statistics
3850 */ 3848 */
3851bfa_status_t 3849bfa_status_t
@@ -3858,7 +3856,7 @@ bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3858 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3856 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3859} 3857}
3860 3858
3861/** 3859/*
3862 * Reset FCoE port statistics 3860 * Reset FCoE port statistics
3863 */ 3861 */
3864bfa_status_t 3862bfa_status_t
@@ -3876,7 +3874,7 @@ bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3874 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3877 3875
3878 qos_attr->state = fcport->qos_attr.state; 3876 qos_attr->state = fcport->qos_attr.state;
3879 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); 3877 qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
3880} 3878}
3881 3879
3882void 3880void
@@ -3887,10 +3885,10 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3887 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; 3885 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3888 u32 i = 0; 3886 u32 i = 0;
3889 3887
3890 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); 3888 qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
3891 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit); 3889 qos_vc_attr->shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
3892 qos_vc_attr->elp_opmode_flags = 3890 qos_vc_attr->elp_opmode_flags =
3893 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags); 3891 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
3894 3892
3895 /* Individual VC info */ 3893 /* Individual VC info */
3896 while (i < qos_vc_attr->total_vc_count) { 3894 while (i < qos_vc_attr->total_vc_count) {
@@ -3904,7 +3902,7 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3904 } 3902 }
3905} 3903}
3906 3904
3907/** 3905/*
3908 * Fetch port attributes. 3906 * Fetch port attributes.
3909 */ 3907 */
3910bfa_boolean_t 3908bfa_boolean_t
@@ -3939,7 +3937,7 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3939 3937
3940 if (ioc_type == BFA_IOC_TYPE_FC) { 3938 if (ioc_type == BFA_IOC_TYPE_FC) {
3941 fcport->cfg.qos_enabled = on_off; 3939 fcport->cfg.qos_enabled = on_off;
3942 /** 3940 /*
3943 * Notify fcpim of the change in QoS state 3941 * Notify fcpim of the change in QoS state
3944 */ 3942 */
3945 bfa_fcpim_update_ioredirect(bfa); 3943 bfa_fcpim_update_ioredirect(bfa);
@@ -3959,7 +3957,7 @@ bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3959 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 3957 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3960} 3958}
3961 3959
3962/** 3960/*
3963 * Configure default minimum ratelim speed 3961 * Configure default minimum ratelim speed
3964 */ 3962 */
3965bfa_status_t 3963bfa_status_t
@@ -3980,7 +3978,7 @@ bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3980 return BFA_STATUS_OK; 3978 return BFA_STATUS_OK;
3981} 3979}
3982 3980
3983/** 3981/*
3984 * Get default minimum ratelim speed 3982 * Get default minimum ratelim speed
3985 */ 3983 */
3986enum bfa_port_speed 3984enum bfa_port_speed
@@ -4095,10 +4093,10 @@ bfa_trunk_disable(struct bfa_s *bfa)
4095} 4093}
4096 4094
4097 4095
4098/** 4096/*
4099 * Rport State machine functions 4097 * Rport State machine functions
4100 */ 4098 */
4101/** 4099/*
4102 * Beginning state, only online event expected. 4100 * Beginning state, only online event expected.
4103 */ 4101 */
4104static void 4102static void
@@ -4151,7 +4149,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4151 } 4149 }
4152} 4150}
4153 4151
4154/** 4152/*
4155 * Waiting for rport create response from firmware. 4153 * Waiting for rport create response from firmware.
4156 */ 4154 */
4157static void 4155static void
@@ -4188,7 +4186,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4188 } 4186 }
4189} 4187}
4190 4188
4191/** 4189/*
4192 * Request queue is full, awaiting queue resume to send create request. 4190 * Request queue is full, awaiting queue resume to send create request.
4193 */ 4191 */
4194static void 4192static void
@@ -4229,7 +4227,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4229 } 4227 }
4230} 4228}
4231 4229
4232/** 4230/*
4233 * Online state - normal parking state. 4231 * Online state - normal parking state.
4234 */ 4232 */
4235static void 4233static void
@@ -4275,9 +4273,9 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4275 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); 4273 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4276 4274
4277 qos_scn->old_qos_attr.qos_flow_id = 4275 qos_scn->old_qos_attr.qos_flow_id =
4278 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id); 4276 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4279 qos_scn->new_qos_attr.qos_flow_id = 4277 qos_scn->new_qos_attr.qos_flow_id =
4280 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id); 4278 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4281 4279
4282 if (qos_scn->old_qos_attr.qos_flow_id != 4280 if (qos_scn->old_qos_attr.qos_flow_id !=
4283 qos_scn->new_qos_attr.qos_flow_id) 4281 qos_scn->new_qos_attr.qos_flow_id)
@@ -4297,7 +4295,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4297 } 4295 }
4298} 4296}
4299 4297
4300/** 4298/*
4301 * Firmware rport is being deleted - awaiting f/w response. 4299 * Firmware rport is being deleted - awaiting f/w response.
4302 */ 4300 */
4303static void 4301static void
@@ -4360,7 +4358,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4360 } 4358 }
4361} 4359}
4362 4360
4363/** 4361/*
4364 * Offline state. 4362 * Offline state.
4365 */ 4363 */
4366static void 4364static void
@@ -4395,7 +4393,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4395 } 4393 }
4396} 4394}
4397 4395
4398/** 4396/*
4399 * Rport is deleted, waiting for firmware response to delete. 4397 * Rport is deleted, waiting for firmware response to delete.
4400 */ 4398 */
4401static void 4399static void
@@ -4447,7 +4445,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4447 } 4445 }
4448} 4446}
4449 4447
4450/** 4448/*
4451 * Waiting for rport create response from firmware. A delete is pending. 4449 * Waiting for rport create response from firmware. A delete is pending.
4452 */ 4450 */
4453static void 4451static void
@@ -4478,7 +4476,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4478 } 4476 }
4479} 4477}
4480 4478
4481/** 4479/*
4482 * Waiting for rport create response from firmware. Rport offline is pending. 4480 * Waiting for rport create response from firmware. Rport offline is pending.
4483 */ 4481 */
4484static void 4482static void
@@ -4513,7 +4511,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4513 } 4511 }
4514} 4512}
4515 4513
4516/** 4514/*
4517 * IOC h/w failed. 4515 * IOC h/w failed.
4518 */ 4516 */
4519static void 4517static void
@@ -4553,7 +4551,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4553 4551
4554 4552
4555 4553
4556/** 4554/*
4557 * bfa_rport_private BFA rport private functions 4555 * bfa_rport_private BFA rport private functions
4558 */ 4556 */
4559 4557
@@ -4612,12 +4610,12 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4612 !(mod->num_rports & (mod->num_rports - 1))); 4610 !(mod->num_rports & (mod->num_rports - 1)));
4613 4611
4614 for (i = 0; i < mod->num_rports; i++, rp++) { 4612 for (i = 0; i < mod->num_rports; i++, rp++) {
4615 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s)); 4613 memset(rp, 0, sizeof(struct bfa_rport_s));
4616 rp->bfa = bfa; 4614 rp->bfa = bfa;
4617 rp->rport_tag = i; 4615 rp->rport_tag = i;
4618 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4616 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4619 4617
4620 /** 4618 /*
4621 * - is unused 4619 * - is unused
4622 */ 4620 */
4623 if (i) 4621 if (i)
@@ -4626,7 +4624,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4626 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); 4624 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4627 } 4625 }
4628 4626
4629 /** 4627 /*
4630 * consume memory 4628 * consume memory
4631 */ 4629 */
4632 bfa_meminfo_kva(meminfo) = (u8 *) rp; 4630 bfa_meminfo_kva(meminfo) = (u8 *) rp;
@@ -4687,7 +4685,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4687{ 4685{
4688 struct bfi_rport_create_req_s *m; 4686 struct bfi_rport_create_req_s *m;
4689 4687
4690 /** 4688 /*
4691 * check for room in queue to send request now 4689 * check for room in queue to send request now
4692 */ 4690 */
4693 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4691 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4699,7 +4697,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4699 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, 4697 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4700 bfa_lpuid(rp->bfa)); 4698 bfa_lpuid(rp->bfa));
4701 m->bfa_handle = rp->rport_tag; 4699 m->bfa_handle = rp->rport_tag;
4702 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz); 4700 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4703 m->pid = rp->rport_info.pid; 4701 m->pid = rp->rport_info.pid;
4704 m->lp_tag = rp->rport_info.lp_tag; 4702 m->lp_tag = rp->rport_info.lp_tag;
4705 m->local_pid = rp->rport_info.local_pid; 4703 m->local_pid = rp->rport_info.local_pid;
@@ -4708,7 +4706,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4708 m->vf_id = rp->rport_info.vf_id; 4706 m->vf_id = rp->rport_info.vf_id;
4709 m->cisc = rp->rport_info.cisc; 4707 m->cisc = rp->rport_info.cisc;
4710 4708
4711 /** 4709 /*
4712 * queue I/O message to firmware 4710 * queue I/O message to firmware
4713 */ 4711 */
4714 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4712 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4720,7 +4718,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4720{ 4718{
4721 struct bfi_rport_delete_req_s *m; 4719 struct bfi_rport_delete_req_s *m;
4722 4720
4723 /** 4721 /*
4724 * check for room in queue to send request now 4722 * check for room in queue to send request now
4725 */ 4723 */
4726 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4724 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4733,7 +4731,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4733 bfa_lpuid(rp->bfa)); 4731 bfa_lpuid(rp->bfa));
4734 m->fw_handle = rp->fw_handle; 4732 m->fw_handle = rp->fw_handle;
4735 4733
4736 /** 4734 /*
4737 * queue I/O message to firmware 4735 * queue I/O message to firmware
4738 */ 4736 */
4739 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4737 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4745,7 +4743,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4745{ 4743{
4746 struct bfa_rport_speed_req_s *m; 4744 struct bfa_rport_speed_req_s *m;
4747 4745
4748 /** 4746 /*
4749 * check for room in queue to send request now 4747 * check for room in queue to send request now
4750 */ 4748 */
4751 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4749 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4759,7 +4757,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4759 m->fw_handle = rp->fw_handle; 4757 m->fw_handle = rp->fw_handle;
4760 m->speed = (u8)rp->rport_info.speed; 4758 m->speed = (u8)rp->rport_info.speed;
4761 4759
4762 /** 4760 /*
4763 * queue I/O message to firmware 4761 * queue I/O message to firmware
4764 */ 4762 */
4765 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4763 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4768,11 +4766,11 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4768 4766
4769 4767
4770 4768
4771/** 4769/*
4772 * bfa_rport_public 4770 * bfa_rport_public
4773 */ 4771 */
4774 4772
4775/** 4773/*
4776 * Rport interrupt processing. 4774 * Rport interrupt processing.
4777 */ 4775 */
4778void 4776void
@@ -4814,7 +4812,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4814 4812
4815 4813
4816 4814
4817/** 4815/*
4818 * bfa_rport_api 4816 * bfa_rport_api
4819 */ 4817 */
4820 4818
@@ -4849,7 +4847,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4849{ 4847{
4850 bfa_assert(rport_info->max_frmsz != 0); 4848 bfa_assert(rport_info->max_frmsz != 0);
4851 4849
4852 /** 4850 /*
4853 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4851 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4854 * responses. Default to minimum size. 4852 * responses. Default to minimum size.
4855 */ 4853 */
@@ -4858,7 +4856,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4858 rport_info->max_frmsz = FC_MIN_PDUSZ; 4856 rport_info->max_frmsz = FC_MIN_PDUSZ;
4859 } 4857 }
4860 4858
4861 bfa_os_assign(rport->rport_info, *rport_info); 4859 rport->rport_info = *rport_info;
4862 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); 4860 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4863} 4861}
4864 4862
@@ -4890,22 +4888,22 @@ bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4890 struct bfa_rport_qos_attr_s *qos_attr) 4888 struct bfa_rport_qos_attr_s *qos_attr)
4891{ 4889{
4892 qos_attr->qos_priority = rport->qos_attr.qos_priority; 4890 qos_attr->qos_priority = rport->qos_attr.qos_priority;
4893 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id); 4891 qos_attr->qos_flow_id = be32_to_cpu(rport->qos_attr.qos_flow_id);
4894 4892
4895} 4893}
4896 4894
4897void 4895void
4898bfa_rport_clear_stats(struct bfa_rport_s *rport) 4896bfa_rport_clear_stats(struct bfa_rport_s *rport)
4899{ 4897{
4900 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats)); 4898 memset(&rport->stats, 0, sizeof(rport->stats));
4901} 4899}
4902 4900
4903 4901
4904/** 4902/*
4905 * SGPG related functions 4903 * SGPG related functions
4906 */ 4904 */
4907 4905
4908/** 4906/*
4909 * Compute and return memory needed by FCP(im) module. 4907 * Compute and return memory needed by FCP(im) module.
4910 */ 4908 */
4911static void 4909static void
@@ -4957,8 +4955,8 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4957 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); 4955 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4958 4956
4959 for (i = 0; i < mod->num_sgpgs; i++) { 4957 for (i = 0; i < mod->num_sgpgs; i++) {
4960 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg)); 4958 memset(hsgpg, 0, sizeof(*hsgpg));
4961 bfa_os_memset(sgpg, 0, sizeof(*sgpg)); 4959 memset(sgpg, 0, sizeof(*sgpg));
4962 4960
4963 hsgpg->sgpg = sgpg; 4961 hsgpg->sgpg = sgpg;
4964 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); 4962 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
@@ -4997,7 +4995,7 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa)
4997 4995
4998 4996
4999 4997
5000/** 4998/*
5001 * hal_sgpg_public BFA SGPG public functions 4999 * hal_sgpg_public BFA SGPG public functions
5002 */ 5000 */
5003 5001
@@ -5039,7 +5037,7 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5039 if (list_empty(&mod->sgpg_wait_q)) 5037 if (list_empty(&mod->sgpg_wait_q))
5040 return; 5038 return;
5041 5039
5042 /** 5040 /*
5043 * satisfy as many waiting requests as possible 5041 * satisfy as many waiting requests as possible
5044 */ 5042 */
5045 do { 5043 do {
@@ -5067,11 +5065,11 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5067 5065
5068 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 5066 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5069 5067
5070 /** 5068 /*
5071 * allocate any left to this one first 5069 * allocate any left to this one first
5072 */ 5070 */
5073 if (mod->free_sgpgs) { 5071 if (mod->free_sgpgs) {
5074 /** 5072 /*
5075 * no one else is waiting for SGPG 5073 * no one else is waiting for SGPG
5076 */ 5074 */
5077 bfa_assert(list_empty(&mod->sgpg_wait_q)); 5075 bfa_assert(list_empty(&mod->sgpg_wait_q));
@@ -5105,7 +5103,7 @@ bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5105 wqe->cbarg = cbarg; 5103 wqe->cbarg = cbarg;
5106} 5104}
5107 5105
5108/** 5106/*
5109 * UF related functions 5107 * UF related functions
5110 */ 5108 */
5111/* 5109/*
@@ -5136,7 +5134,7 @@ claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5136 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; 5134 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
5137 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; 5135 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
5138 5136
5139 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); 5137 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
5140} 5138}
5141 5139
5142static void 5140static void
@@ -5153,11 +5151,11 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5153 5151
5154 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; 5152 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5155 i++, uf_bp_msg++) { 5153 i++, uf_bp_msg++) {
5156 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); 5154 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5157 5155
5158 uf_bp_msg->buf_tag = i; 5156 uf_bp_msg->buf_tag = i;
5159 buf_len = sizeof(struct bfa_uf_buf_s); 5157 buf_len = sizeof(struct bfa_uf_buf_s);
5160 uf_bp_msg->buf_len = bfa_os_htons(buf_len); 5158 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5161 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, 5159 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5162 bfa_lpuid(ufm->bfa)); 5160 bfa_lpuid(ufm->bfa));
5163 5161
@@ -5173,7 +5171,7 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5173 bfa_sge_to_be(&sge[1]); 5171 bfa_sge_to_be(&sge[1]);
5174 } 5172 }
5175 5173
5176 /** 5174 /*
5177 * advance pointer beyond consumed memory 5175 * advance pointer beyond consumed memory
5178 */ 5176 */
5179 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; 5177 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
@@ -5194,7 +5192,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5194 * Initialize UFs and queue it in UF free queue 5192 * Initialize UFs and queue it in UF free queue
5195 */ 5193 */
5196 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { 5194 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5197 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s)); 5195 memset(uf, 0, sizeof(struct bfa_uf_s));
5198 uf->bfa = ufm->bfa; 5196 uf->bfa = ufm->bfa;
5199 uf->uf_tag = i; 5197 uf->uf_tag = i;
5200 uf->pb_len = sizeof(struct bfa_uf_buf_s); 5198 uf->pb_len = sizeof(struct bfa_uf_buf_s);
@@ -5203,7 +5201,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5203 list_add_tail(&uf->qe, &ufm->uf_free_q); 5201 list_add_tail(&uf->qe, &ufm->uf_free_q);
5204 } 5202 }
5205 5203
5206 /** 5204 /*
5207 * advance memory pointer 5205 * advance memory pointer
5208 */ 5206 */
5209 bfa_meminfo_kva(mi) = (u8 *) uf; 5207 bfa_meminfo_kva(mi) = (u8 *) uf;
@@ -5241,7 +5239,7 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5241{ 5239{
5242 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 5240 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5243 5241
5244 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); 5242 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
5245 ufm->bfa = bfa; 5243 ufm->bfa = bfa;
5246 ufm->num_ufs = cfg->fwcfg.num_uf_bufs; 5244 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5247 INIT_LIST_HEAD(&ufm->uf_free_q); 5245 INIT_LIST_HEAD(&ufm->uf_free_q);
@@ -5279,7 +5277,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5279 if (!uf_post_msg) 5277 if (!uf_post_msg)
5280 return BFA_STATUS_FAILED; 5278 return BFA_STATUS_FAILED;
5281 5279
5282 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], 5280 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5283 sizeof(struct bfi_uf_buf_post_s)); 5281 sizeof(struct bfi_uf_buf_post_s));
5284 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); 5282 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5285 5283
@@ -5310,8 +5308,8 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5310 u8 *buf = &uf_buf->d[0]; 5308 u8 *buf = &uf_buf->d[0];
5311 struct fchs_s *fchs; 5309 struct fchs_s *fchs;
5312 5310
5313 m->frm_len = bfa_os_ntohs(m->frm_len); 5311 m->frm_len = be16_to_cpu(m->frm_len);
5314 m->xfr_len = bfa_os_ntohs(m->xfr_len); 5312 m->xfr_len = be16_to_cpu(m->xfr_len);
5315 5313
5316 fchs = (struct fchs_s *)uf_buf; 5314 fchs = (struct fchs_s *)uf_buf;
5317 5315
@@ -5365,11 +5363,11 @@ bfa_uf_start(struct bfa_s *bfa)
5365 5363
5366 5364
5367 5365
5368/** 5366/*
5369 * hal_uf_api 5367 * hal_uf_api
5370 */ 5368 */
5371 5369
5372/** 5370/*
5373 * Register handler for all unsolicted recieve frames. 5371 * Register handler for all unsolicted recieve frames.
5374 * 5372 *
5375 * @param[in] bfa BFA instance 5373 * @param[in] bfa BFA instance
@@ -5385,7 +5383,7 @@ bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5385 ufm->cbarg = cbarg; 5383 ufm->cbarg = cbarg;
5386} 5384}
5387 5385
5388/** 5386/*
5389 * Free an unsolicited frame back to BFA. 5387 * Free an unsolicited frame back to BFA.
5390 * 5388 *
5391 * @param[in] uf unsolicited frame to be freed 5389 * @param[in] uf unsolicited frame to be freed
@@ -5401,7 +5399,7 @@ bfa_uf_free(struct bfa_uf_s *uf)
5401 5399
5402 5400
5403 5401
5404/** 5402/*
5405 * uf_pub BFA uf module public functions 5403 * uf_pub BFA uf module public functions
5406 */ 5404 */
5407void 5405void
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 9921dad0d03..e2349d5cdb9 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -22,12 +22,12 @@
22#include "bfi_ms.h" 22#include "bfi_ms.h"
23 23
24 24
25/** 25/*
26 * Scatter-gather DMA related defines 26 * Scatter-gather DMA related defines
27 */ 27 */
28#define BFA_SGPG_MIN (16) 28#define BFA_SGPG_MIN (16)
29 29
30/** 30/*
31 * Alignment macro for SG page allocation 31 * Alignment macro for SG page allocation
32 */ 32 */
33#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \ 33#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
@@ -48,7 +48,7 @@ struct bfa_sgpg_s {
48 union bfi_addr_u sgpg_pa; /* pa of SG page */ 48 union bfi_addr_u sgpg_pa; /* pa of SG page */
49}; 49};
50 50
51/** 51/*
52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of 52 * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
53 * SG pages required. 53 * SG pages required.
54 */ 54 */
@@ -75,7 +75,7 @@ void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
75void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); 75void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
76 76
77 77
78/** 78/*
79 * FCXP related defines 79 * FCXP related defines
80 */ 80 */
81#define BFA_FCXP_MIN (1) 81#define BFA_FCXP_MIN (1)
@@ -115,12 +115,12 @@ typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
115 115
116 116
117 117
118/** 118/*
119 * Information needed for a FCXP request 119 * Information needed for a FCXP request
120 */ 120 */
121struct bfa_fcxp_req_info_s { 121struct bfa_fcxp_req_info_s {
122 struct bfa_rport_s *bfa_rport; 122 struct bfa_rport_s *bfa_rport;
123 /** Pointer to the bfa rport that was 123 /* Pointer to the bfa rport that was
124 * returned from bfa_rport_create(). 124 * returned from bfa_rport_create().
125 * This could be left NULL for WKA or 125 * This could be left NULL for WKA or
126 * for FCXP interactions before the 126 * for FCXP interactions before the
@@ -137,11 +137,10 @@ struct bfa_fcxp_req_info_s {
137 137
138struct bfa_fcxp_rsp_info_s { 138struct bfa_fcxp_rsp_info_s {
139 struct fchs_s rsp_fchs; 139 struct fchs_s rsp_fchs;
140 /** !< Response frame's FC header will 140 /* Response frame's FC header will
141 * be sent back in this field */ 141 * be sent back in this field */
142 u8 rsp_timeout; 142 u8 rsp_timeout;
143 /** !< timeout in seconds, 0-no response 143 /* timeout in seconds, 0-no response */
144 */
145 u8 rsvd2[3]; 144 u8 rsvd2[3];
146 u32 rsp_maxlen; /* max response length expected */ 145 u32 rsp_maxlen; /* max response length expected */
147}; 146};
@@ -218,7 +217,7 @@ struct bfa_fcxp_wqe_s {
218void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 217void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
219 218
220 219
221/** 220/*
222 * RPORT related defines 221 * RPORT related defines
223 */ 222 */
224#define BFA_RPORT_MIN 4 223#define BFA_RPORT_MIN 4
@@ -232,7 +231,7 @@ struct bfa_rport_mod_s {
232 231
233#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) 232#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
234 233
235/** 234/*
236 * Convert rport tag to RPORT 235 * Convert rport tag to RPORT
237 */ 236 */
238#define BFA_RPORT_FROM_TAG(__bfa, _tag) \ 237#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
@@ -244,7 +243,7 @@ struct bfa_rport_mod_s {
244 */ 243 */
245void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 244void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
246 245
247/** 246/*
248 * BFA rport information. 247 * BFA rport information.
249 */ 248 */
250struct bfa_rport_info_s { 249struct bfa_rport_info_s {
@@ -259,7 +258,7 @@ struct bfa_rport_info_s {
259 enum bfa_port_speed speed; /* Rport's current speed */ 258 enum bfa_port_speed speed; /* Rport's current speed */
260}; 259};
261 260
262/** 261/*
263 * BFA rport data structure 262 * BFA rport data structure
264 */ 263 */
265struct bfa_rport_s { 264struct bfa_rport_s {
@@ -282,7 +281,7 @@ struct bfa_rport_s {
282#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class) 281#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
283 282
284 283
285/** 284/*
286 * UF - unsolicited receive related defines 285 * UF - unsolicited receive related defines
287 */ 286 */
288 287
@@ -305,7 +304,7 @@ struct bfa_uf_s {
305 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX]; 304 struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
306}; 305};
307 306
308/** 307/*
309 * Callback prototype for unsolicited frame receive handler. 308 * Callback prototype for unsolicited frame receive handler.
310 * 309 *
311 * @param[in] cbarg callback arg for receive handler 310 * @param[in] cbarg callback arg for receive handler
@@ -338,7 +337,7 @@ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
338 337
339#define BFA_UF_BUFSZ (2 * 1024 + 256) 338#define BFA_UF_BUFSZ (2 * 1024 + 256)
340 339
341/** 340/*
342 * @todo private 341 * @todo private
343 */ 342 */
344struct bfa_uf_buf_s { 343struct bfa_uf_buf_s {
@@ -346,7 +345,7 @@ struct bfa_uf_buf_s {
346}; 345};
347 346
348 347
349/** 348/*
350 * LPS - bfa lport login/logout service interface 349 * LPS - bfa lport login/logout service interface
351 */ 350 */
352struct bfa_lps_s { 351struct bfa_lps_s {
@@ -397,14 +396,14 @@ struct bfa_lps_mod_s {
397void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 396void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
398 397
399 398
400/** 399/*
401 * FCPORT related defines 400 * FCPORT related defines
402 */ 401 */
403 402
404#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) 403#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
405typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status); 404typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
406 405
407/** 406/*
408 * Link notification data structure 407 * Link notification data structure
409 */ 408 */
410struct bfa_fcport_ln_s { 409struct bfa_fcport_ln_s {
@@ -418,7 +417,7 @@ struct bfa_fcport_trunk_s {
418 struct bfa_trunk_attr_s attr; 417 struct bfa_trunk_attr_s attr;
419}; 418};
420 419
421/** 420/*
422 * BFA FC port data structure 421 * BFA FC port data structure
423 */ 422 */
424struct bfa_fcport_s { 423struct bfa_fcport_s {
@@ -613,7 +612,7 @@ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
613 void *cbarg); 612 void *cbarg);
614void bfa_uf_free(struct bfa_uf_s *uf); 613void bfa_uf_free(struct bfa_uf_s *uf);
615 614
616/** 615/*
617 * bfa lport service api 616 * bfa lport service api
618 */ 617 */
619 618
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 4d8784e06e1..1f938974b84 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfad.c Linux driver PCI interface module. 19 * bfad.c Linux driver PCI interface module.
20 */ 20 */
21#include <linux/module.h> 21#include <linux/module.h>
@@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
151static void 151static void
152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
153 153
154/** 154/*
155 * Beginning state for the driver instance, awaiting the pci_probe event 155 * Beginning state for the driver instance, awaiting the pci_probe event
156 */ 156 */
157static void 157static void
@@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
181 } 181 }
182} 182}
183 183
184/** 184/*
185 * Driver Instance is created, awaiting event INIT to initialize the bfad 185 * Driver Instance is created, awaiting event INIT to initialize the bfad
186 */ 186 */
187static void 187static void
@@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
364 } 364 }
365} 365}
366 366
367/** 367/*
368 * BFA callbacks 368 * BFA callbacks
369 */ 369 */
370void 370void
@@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
376 complete(&fcomp->comp); 376 complete(&fcomp->comp);
377} 377}
378 378
379/** 379/*
380 * bfa_init callback 380 * bfa_init callback
381 */ 381 */
382void 382void
@@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
401 complete(&bfad->comp); 401 complete(&bfad->comp);
402} 402}
403 403
404/** 404/*
405 * BFA_FCS callbacks 405 * BFA_FCS callbacks
406 */ 406 */
407struct bfad_port_s * 407struct bfad_port_s *
@@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
457 } 457 }
458} 458}
459 459
460/** 460/*
461 * FCS RPORT alloc callback, after successful PLOGI by FCS 461 * FCS RPORT alloc callback, after successful PLOGI by FCS
462 */ 462 */
463bfa_status_t 463bfa_status_t
@@ -478,7 +478,7 @@ ext:
478 return rc; 478 return rc;
479} 479}
480 480
481/** 481/*
482 * FCS PBC VPORT Create 482 * FCS PBC VPORT Create
483 */ 483 */
484void 484void
@@ -663,7 +663,7 @@ ext:
663 return rc; 663 return rc;
664} 664}
665 665
666/** 666/*
667 * Create a vport under a vf. 667 * Create a vport under a vf.
668 */ 668 */
669bfa_status_t 669bfa_status_t
@@ -716,30 +716,6 @@ ext:
716 return rc; 716 return rc;
717} 717}
718 718
719/**
720 * Create a vf and its base vport implicitely.
721 */
722bfa_status_t
723bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
724 struct bfa_lport_cfg_s *port_cfg)
725{
726 struct bfad_vf_s *vf;
727 int rc = BFA_STATUS_OK;
728
729 vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
730 if (!vf) {
731 rc = BFA_STATUS_FAILED;
732 goto ext;
733 }
734
735 rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
736 vf);
737 if (rc != BFA_STATUS_OK)
738 kfree(vf);
739ext:
740 return rc;
741}
742
743void 719void
744bfad_bfa_tmo(unsigned long data) 720bfad_bfa_tmo(unsigned long data)
745{ 721{
@@ -885,20 +861,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
885 pci_set_drvdata(pdev, NULL); 861 pci_set_drvdata(pdev, NULL);
886} 862}
887 863
888void
889bfad_fcs_port_cfg(struct bfad_s *bfad)
890{
891 struct bfa_lport_cfg_s port_cfg;
892 struct bfa_port_attr_s attr;
893 char symname[BFA_SYMNAME_MAXLEN];
894
895 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
896 memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
897 bfa_fcport_get_attr(&bfad->bfa, &attr);
898 port_cfg.nwwn = attr.nwwn;
899 port_cfg.pwwn = attr.pwwn;
900}
901
902bfa_status_t 864bfa_status_t
903bfad_drv_init(struct bfad_s *bfad) 865bfad_drv_init(struct bfad_s *bfad)
904{ 866{
@@ -1089,9 +1051,6 @@ bfad_start_ops(struct bfad_s *bfad) {
1089 bfa_fcs_init(&bfad->bfa_fcs); 1051 bfa_fcs_init(&bfad->bfa_fcs);
1090 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1052 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1091 1053
1092 /* PPORT FCS config */
1093 bfad_fcs_port_cfg(bfad);
1094
1095 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 1054 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1096 if (retval != BFA_STATUS_OK) { 1055 if (retval != BFA_STATUS_OK) {
1097 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1056 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
@@ -1181,7 +1140,7 @@ bfad_worker(void *ptr)
1181 return 0; 1140 return 0;
1182} 1141}
1183 1142
1184/** 1143/*
1185 * BFA driver interrupt functions 1144 * BFA driver interrupt functions
1186 */ 1145 */
1187irqreturn_t 1146irqreturn_t
@@ -1240,7 +1199,7 @@ bfad_msix(int irq, void *dev_id)
1240 return IRQ_HANDLED; 1199 return IRQ_HANDLED;
1241} 1200}
1242 1201
1243/** 1202/*
1244 * Initialize the MSIX entry table. 1203 * Initialize the MSIX entry table.
1245 */ 1204 */
1246static void 1205static void
@@ -1293,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
1293 return 0; 1252 return 0;
1294} 1253}
1295 1254
1296/** 1255/*
1297 * Setup MSIX based interrupt. 1256 * Setup MSIX based interrupt.
1298 */ 1257 */
1299int 1258int
@@ -1374,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad)
1374 } 1333 }
1375} 1334}
1376 1335
1377/** 1336/*
1378 * PCI probe entry. 1337 * PCI probe entry.
1379 */ 1338 */
1380int 1339int
@@ -1460,7 +1419,7 @@ out:
1460 return error; 1419 return error;
1461} 1420}
1462 1421
1463/** 1422/*
1464 * PCI remove entry. 1423 * PCI remove entry.
1465 */ 1424 */
1466void 1425void
@@ -1541,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = {
1541 .remove = __devexit_p(bfad_pci_remove), 1500 .remove = __devexit_p(bfad_pci_remove),
1542}; 1501};
1543 1502
1544/** 1503/*
1545 * Driver module init. 1504 * Driver module init.
1546 */ 1505 */
1547static int __init 1506static int __init
@@ -1581,7 +1540,7 @@ ext:
1581 return error; 1540 return error;
1582} 1541}
1583 1542
1584/** 1543/*
1585 * Driver module exit. 1544 * Driver module exit.
1586 */ 1545 */
1587static void __exit 1546static void __exit
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index d8843720eac..ed9fff440b5 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -15,14 +15,14 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfa_attr.c Linux driver configuration interface module. 19 * bfa_attr.c Linux driver configuration interface module.
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h" 23#include "bfad_im.h"
24 24
25/** 25/*
26 * FC transport template entry, get SCSI target port ID. 26 * FC transport template entry, get SCSI target port ID.
27 */ 27 */
28void 28void
@@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
48 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 48 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
49} 49}
50 50
51/** 51/*
52 * FC transport template entry, get SCSI target nwwn. 52 * FC transport template entry, get SCSI target nwwn.
53 */ 53 */
54void 54void
@@ -70,11 +70,11 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
70 if (itnim) 70 if (itnim)
71 node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); 71 node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
72 72
73 fc_starget_node_name(starget) = bfa_os_htonll(node_name); 73 fc_starget_node_name(starget) = cpu_to_be64(node_name);
74 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 74 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
75} 75}
76 76
77/** 77/*
78 * FC transport template entry, get SCSI target pwwn. 78 * FC transport template entry, get SCSI target pwwn.
79 */ 79 */
80void 80void
@@ -96,11 +96,11 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
96 if (itnim) 96 if (itnim)
97 port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 97 port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
98 98
99 fc_starget_port_name(starget) = bfa_os_htonll(port_name); 99 fc_starget_port_name(starget) = cpu_to_be64(port_name);
100 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 100 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
101} 101}
102 102
103/** 103/*
104 * FC transport template entry, get SCSI host port ID. 104 * FC transport template entry, get SCSI host port ID.
105 */ 105 */
106void 106void
@@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
115} 115}
116 116
117/** 117/*
118 * FC transport template entry, get SCSI host port type. 118 * FC transport template entry, get SCSI host port type.
119 */ 119 */
120static void 120static void
@@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
146 } 146 }
147} 147}
148 148
149/** 149/*
150 * FC transport template entry, get SCSI host port state. 150 * FC transport template entry, get SCSI host port state.
151 */ 151 */
152static void 152static void
@@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
183 } 183 }
184} 184}
185 185
186/** 186/*
187 * FC transport template entry, get SCSI host active fc4s. 187 * FC transport template entry, get SCSI host active fc4s.
188 */ 188 */
189static void 189static void
@@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
202 fc_host_active_fc4s(shost)[7] = 1; 202 fc_host_active_fc4s(shost)[7] = 1;
203} 203}
204 204
205/** 205/*
206 * FC transport template entry, get SCSI host link speed. 206 * FC transport template entry, get SCSI host link speed.
207 */ 207 */
208static void 208static void
@@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
236 } 236 }
237} 237}
238 238
239/** 239/*
240 * FC transport template entry, get SCSI host port type. 240 * FC transport template entry, get SCSI host port type.
241 */ 241 */
242static void 242static void
@@ -249,11 +249,11 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
249 249
250 fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); 250 fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
251 251
252 fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn); 252 fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
253 253
254} 254}
255 255
256/** 256/*
257 * FC transport template entry, get BFAD statistics. 257 * FC transport template entry, get BFAD statistics.
258 */ 258 */
259static struct fc_host_statistics * 259static struct fc_host_statistics *
@@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
304 return hstats; 304 return hstats;
305} 305}
306 306
307/** 307/*
308 * FC transport template entry, reset BFAD statistics. 308 * FC transport template entry, reset BFAD statistics.
309 */ 309 */
310static void 310static void
@@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
331 return; 331 return;
332} 332}
333 333
334/** 334/*
335 * FC transport template entry, get rport loss timeout. 335 * FC transport template entry, get rport loss timeout.
336 */ 336 */
337static void 337static void
@@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
347 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 347 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
348} 348}
349 349
350/** 350/*
351 * FC transport template entry, set rport loss timeout. 351 * FC transport template entry, set rport loss timeout.
352 */ 352 */
353static void 353static void
@@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
634}; 634};
635 635
636/** 636/*
637 * Scsi_Host_attrs SCSI host attributes 637 * Scsi_Host_attrs SCSI host attributes
638 */ 638 */
639static ssize_t 639static ssize_t
@@ -733,7 +733,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
733 u64 nwwn; 733 u64 nwwn;
734 734
735 nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); 735 nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
736 return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn)); 736 return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
737} 737}
738 738
739static ssize_t 739static ssize_t
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 69ed1c4a903..1fedeeb4ac1 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -318,7 +318,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
318 regbuf = (u32 *)bfad->regdata; 318 regbuf = (u32 *)bfad->regdata;
319 spin_lock_irqsave(&bfad->bfad_lock, flags); 319 spin_lock_irqsave(&bfad->bfad_lock, flags);
320 for (i = 0; i < len; i++) { 320 for (i = 0; i < len; i++) {
321 *regbuf = bfa_reg_read(reg_addr); 321 *regbuf = readl(reg_addr);
322 regbuf++; 322 regbuf++;
323 reg_addr += sizeof(u32); 323 reg_addr += sizeof(u32);
324 } 324 }
@@ -361,7 +361,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
361 361
362 reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr); 362 reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
363 spin_lock_irqsave(&bfad->bfad_lock, flags); 363 spin_lock_irqsave(&bfad->bfad_lock, flags);
364 bfa_reg_write(reg_addr, val); 364 writel(val, reg_addr);
365 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 365 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
366 366
367 return nbytes; 367 return nbytes;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 98420bbb4f3..97f9b6c0937 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -15,11 +15,11 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * Contains base driver definitions. 19 * Contains base driver definitions.
20 */ 20 */
21 21
22/** 22/*
23 * bfa_drv.h Linux driver data structures. 23 * bfa_drv.h Linux driver data structures.
24 */ 24 */
25 25
@@ -309,7 +309,6 @@ void bfad_bfa_tmo(unsigned long data);
309void bfad_init_timer(struct bfad_s *bfad); 309void bfad_init_timer(struct bfad_s *bfad);
310int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); 310int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
311void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); 311void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
312void bfad_fcs_port_cfg(struct bfad_s *bfad);
313void bfad_drv_uninit(struct bfad_s *bfad); 312void bfad_drv_uninit(struct bfad_s *bfad);
314int bfad_worker(void *ptr); 313int bfad_worker(void *ptr);
315void bfad_debugfs_init(struct bfad_port_s *port); 314void bfad_debugfs_init(struct bfad_port_s *port);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index d950ee44016..8daa716739d 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfad_im.c Linux driver IM module. 19 * bfad_im.c Linux driver IM module.
20 */ 20 */
21 21
@@ -164,10 +164,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
164 wake_up(wq); 164 wake_up(wq);
165} 165}
166 166
167/** 167/*
168 * Scsi_Host_template SCSI host template 168 * Scsi_Host_template SCSI host template
169 */ 169 */
170/** 170/*
171 * Scsi_Host template entry, returns BFAD PCI info. 171 * Scsi_Host template entry, returns BFAD PCI info.
172 */ 172 */
173static const char * 173static const char *
@@ -196,7 +196,7 @@ bfad_im_info(struct Scsi_Host *shost)
196 return bfa_buf; 196 return bfa_buf;
197} 197}
198 198
199/** 199/*
200 * Scsi_Host template entry, aborts the specified SCSI command. 200 * Scsi_Host template entry, aborts the specified SCSI command.
201 * 201 *
202 * Returns: SUCCESS or FAILED. 202 * Returns: SUCCESS or FAILED.
@@ -280,7 +280,7 @@ out:
280 return rc; 280 return rc;
281} 281}
282 282
283/** 283/*
284 * Scsi_Host template entry, resets a LUN and abort its all commands. 284 * Scsi_Host template entry, resets a LUN and abort its all commands.
285 * 285 *
286 * Returns: SUCCESS or FAILED. 286 * Returns: SUCCESS or FAILED.
@@ -319,7 +319,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
319 goto out; 319 goto out;
320 } 320 }
321 321
322 /** 322 /*
323 * Set host_scribble to NULL to avoid aborting a task command 323 * Set host_scribble to NULL to avoid aborting a task command
324 * if happens. 324 * if happens.
325 */ 325 */
@@ -346,7 +346,7 @@ out:
346 return rc; 346 return rc;
347} 347}
348 348
349/** 349/*
350 * Scsi_Host template entry, resets the bus and abort all commands. 350 * Scsi_Host template entry, resets the bus and abort all commands.
351 */ 351 */
352static int 352static int
@@ -396,7 +396,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
396 return SUCCESS; 396 return SUCCESS;
397} 397}
398 398
399/** 399/*
400 * Scsi_Host template entry slave_destroy. 400 * Scsi_Host template entry slave_destroy.
401 */ 401 */
402static void 402static void
@@ -406,11 +406,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
406 return; 406 return;
407} 407}
408 408
409/** 409/*
410 * BFA FCS itnim callbacks 410 * BFA FCS itnim callbacks
411 */ 411 */
412 412
413/** 413/*
414 * BFA FCS itnim alloc callback, after successful PRLI 414 * BFA FCS itnim alloc callback, after successful PRLI
415 * Context: Interrupt 415 * Context: Interrupt
416 */ 416 */
@@ -433,7 +433,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
433 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 433 bfad->bfad_flags |= BFAD_RPORT_ONLINE;
434} 434}
435 435
436/** 436/*
437 * BFA FCS itnim free callback. 437 * BFA FCS itnim free callback.
438 * Context: Interrupt. bfad_lock is held 438 * Context: Interrupt. bfad_lock is held
439 */ 439 */
@@ -471,7 +471,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
471 queue_work(im->drv_workq, &itnim_drv->itnim_work); 471 queue_work(im->drv_workq, &itnim_drv->itnim_work);
472} 472}
473 473
474/** 474/*
475 * BFA FCS itnim online callback. 475 * BFA FCS itnim online callback.
476 * Context: Interrupt. bfad_lock is held 476 * Context: Interrupt. bfad_lock is held
477 */ 477 */
@@ -492,7 +492,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
492 queue_work(im->drv_workq, &itnim_drv->itnim_work); 492 queue_work(im->drv_workq, &itnim_drv->itnim_work);
493} 493}
494 494
495/** 495/*
496 * BFA FCS itnim offline callback. 496 * BFA FCS itnim offline callback.
497 * Context: Interrupt. bfad_lock is held 497 * Context: Interrupt. bfad_lock is held
498 */ 498 */
@@ -519,7 +519,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
519 queue_work(im->drv_workq, &itnim_drv->itnim_work); 519 queue_work(im->drv_workq, &itnim_drv->itnim_work);
520} 520}
521 521
522/** 522/*
523 * Allocate a Scsi_Host for a port. 523 * Allocate a Scsi_Host for a port.
524 */ 524 */
525int 525int
@@ -751,7 +751,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
751 return BFA_STATUS_OK; 751 return BFA_STATUS_OK;
752} 752}
753 753
754/** 754/*
755 * Scsi_Host template entry. 755 * Scsi_Host template entry.
756 * 756 *
757 * Description: 757 * Description:
@@ -896,7 +896,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
896 return NULL; 896 return NULL;
897} 897}
898 898
899/** 899/*
900 * Scsi_Host template entry slave_alloc 900 * Scsi_Host template entry slave_alloc
901 */ 901 */
902static int 902static int
@@ -915,12 +915,16 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
915static u32 915static u32
916bfad_im_supported_speeds(struct bfa_s *bfa) 916bfad_im_supported_speeds(struct bfa_s *bfa)
917{ 917{
918 struct bfa_ioc_attr_s ioc_attr; 918 struct bfa_ioc_attr_s *ioc_attr;
919 u32 supported_speed = 0; 919 u32 supported_speed = 0;
920 920
921 bfa_get_attr(bfa, &ioc_attr); 921 ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
922 if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 922 if (!ioc_attr)
923 if (ioc_attr.adapter_attr.is_mezz) { 923 return 0;
924
925 bfa_get_attr(bfa, ioc_attr);
926 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
927 if (ioc_attr->adapter_attr.is_mezz) {
924 supported_speed |= FC_PORTSPEED_8GBIT | 928 supported_speed |= FC_PORTSPEED_8GBIT |
925 FC_PORTSPEED_4GBIT | 929 FC_PORTSPEED_4GBIT |
926 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 930 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
@@ -929,12 +933,13 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
929 FC_PORTSPEED_4GBIT | 933 FC_PORTSPEED_4GBIT |
930 FC_PORTSPEED_2GBIT; 934 FC_PORTSPEED_2GBIT;
931 } 935 }
932 } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { 936 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
933 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 937 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
934 FC_PORTSPEED_1GBIT; 938 FC_PORTSPEED_1GBIT;
935 } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { 939 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
936 supported_speed |= FC_PORTSPEED_10GBIT; 940 supported_speed |= FC_PORTSPEED_10GBIT;
937 } 941 }
942 kfree(ioc_attr);
938 return supported_speed; 943 return supported_speed;
939} 944}
940 945
@@ -944,14 +949,13 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
944 struct Scsi_Host *host = im_port->shost; 949 struct Scsi_Host *host = im_port->shost;
945 struct bfad_s *bfad = im_port->bfad; 950 struct bfad_s *bfad = im_port->bfad;
946 struct bfad_port_s *port = im_port->port; 951 struct bfad_port_s *port = im_port->port;
947 struct bfa_port_attr_s pattr;
948 struct bfa_lport_attr_s port_attr;
949 char symname[BFA_SYMNAME_MAXLEN]; 952 char symname[BFA_SYMNAME_MAXLEN];
953 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
950 954
951 fc_host_node_name(host) = 955 fc_host_node_name(host) =
952 bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port))); 956 cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
953 fc_host_port_name(host) = 957 fc_host_port_name(host) =
954 bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port))); 958 cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
955 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 959 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
956 960
957 fc_host_supported_classes(host) = FC_COS_CLASS3; 961 fc_host_supported_classes(host) = FC_COS_CLASS3;
@@ -964,15 +968,12 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
964 /* For fibre channel services type 0x20 */ 968 /* For fibre channel services type 0x20 */
965 fc_host_supported_fc4s(host)[7] = 1; 969 fc_host_supported_fc4s(host)[7] = 1;
966 970
967 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 971 strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
968 strncpy(symname, port_attr.port_cfg.sym_name.symname,
969 BFA_SYMNAME_MAXLEN); 972 BFA_SYMNAME_MAXLEN);
970 sprintf(fc_host_symbolic_name(host), "%s", symname); 973 sprintf(fc_host_symbolic_name(host), "%s", symname);
971 974
972 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 975 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
973 976 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
974 bfa_fcport_get_attr(&bfad->bfa, &pattr);
975 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
976} 977}
977 978
978static void 979static void
@@ -983,9 +984,9 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
983 struct bfad_itnim_data_s *itnim_data; 984 struct bfad_itnim_data_s *itnim_data;
984 985
985 rport_ids.node_name = 986 rport_ids.node_name =
986 bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); 987 cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
987 rport_ids.port_name = 988 rport_ids.port_name =
988 bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 989 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
989 rport_ids.port_id = 990 rport_ids.port_id =
990 bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); 991 bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
991 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 992 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
@@ -1015,7 +1016,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
1015 return; 1016 return;
1016} 1017}
1017 1018
1018/** 1019/*
1019 * Work queue handler using FC transport service 1020 * Work queue handler using FC transport service
1020* Context: kernel 1021* Context: kernel
1021 */ 1022 */
@@ -1115,7 +1116,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1115 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1116 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1116} 1117}
1117 1118
1118/** 1119/*
1119 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1120 * Scsi_Host template entry, queue a SCSI command to the BFAD.
1120 */ 1121 */
1121static int 1122static int
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 85f2224a573..58796d1284b 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -23,7 +23,7 @@
23 23
24#pragma pack(1) 24#pragma pack(1)
25 25
26/** 26/*
27 * BFI FW image type 27 * BFI FW image type
28 */ 28 */
29#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ 29#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
@@ -35,7 +35,7 @@ enum {
35 BFI_IMAGE_MAX, 35 BFI_IMAGE_MAX,
36}; 36};
37 37
38/** 38/*
39 * Msg header common to all msgs 39 * Msg header common to all msgs
40 */ 40 */
41struct bfi_mhdr_s { 41struct bfi_mhdr_s {
@@ -68,7 +68,7 @@ struct bfi_mhdr_s {
68#define BFI_I2H_OPCODE_BASE 128 68#define BFI_I2H_OPCODE_BASE 128
69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) 69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
70 70
71/** 71/*
72 **************************************************************************** 72 ****************************************************************************
73 * 73 *
74 * Scatter Gather Element and Page definition 74 * Scatter Gather Element and Page definition
@@ -79,7 +79,7 @@ struct bfi_mhdr_s {
79#define BFI_SGE_INLINE 1 79#define BFI_SGE_INLINE 1
80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) 80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
81 81
82/** 82/*
83 * SG Flags 83 * SG Flags
84 */ 84 */
85enum { 85enum {
@@ -90,7 +90,7 @@ enum {
90 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */ 90 BFI_SGE_PGDLEN = 2, /* cumulative data length for page */
91}; 91};
92 92
93/** 93/*
94 * DMA addresses 94 * DMA addresses
95 */ 95 */
96union bfi_addr_u { 96union bfi_addr_u {
@@ -100,7 +100,7 @@ union bfi_addr_u {
100 } a32; 100 } a32;
101}; 101};
102 102
103/** 103/*
104 * Scatter Gather Element 104 * Scatter Gather Element
105 */ 105 */
106struct bfi_sge_s { 106struct bfi_sge_s {
@@ -116,7 +116,7 @@ struct bfi_sge_s {
116 union bfi_addr_u sga; 116 union bfi_addr_u sga;
117}; 117};
118 118
119/** 119/*
120 * Scatter Gather Page 120 * Scatter Gather Page
121 */ 121 */
122#define BFI_SGPG_DATA_SGES 7 122#define BFI_SGPG_DATA_SGES 7
@@ -139,7 +139,7 @@ struct bfi_msg_s {
139 u32 pl[BFI_LMSG_PL_WSZ]; 139 u32 pl[BFI_LMSG_PL_WSZ];
140}; 140};
141 141
142/** 142/*
143 * Mailbox message structure 143 * Mailbox message structure
144 */ 144 */
145#define BFI_MBMSG_SZ 7 145#define BFI_MBMSG_SZ 7
@@ -148,7 +148,7 @@ struct bfi_mbmsg_s {
148 u32 pl[BFI_MBMSG_SZ]; 148 u32 pl[BFI_MBMSG_SZ];
149}; 149};
150 150
151/** 151/*
152 * Message Classes 152 * Message Classes
153 */ 153 */
154enum bfi_mclass { 154enum bfi_mclass {
@@ -186,7 +186,7 @@ enum bfi_mclass {
186#define BFI_BOOT_LOADER_BIOS 1 186#define BFI_BOOT_LOADER_BIOS 1
187#define BFI_BOOT_LOADER_UEFI 2 187#define BFI_BOOT_LOADER_UEFI 2
188 188
189/** 189/*
190 *---------------------------------------------------------------------- 190 *----------------------------------------------------------------------
191 * IOC 191 * IOC
192 *---------------------------------------------------------------------- 192 *----------------------------------------------------------------------
@@ -208,7 +208,7 @@ enum bfi_ioc_i2h_msgs {
208 BFI_IOC_I2H_HBEAT = BFA_I2HM(5), 208 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
209}; 209};
210 210
211/** 211/*
212 * BFI_IOC_H2I_GETATTR_REQ message 212 * BFI_IOC_H2I_GETATTR_REQ message
213 */ 213 */
214struct bfi_ioc_getattr_req_s { 214struct bfi_ioc_getattr_req_s {
@@ -242,7 +242,7 @@ struct bfi_ioc_attr_s {
242 u32 card_type; /* card type */ 242 u32 card_type; /* card type */
243}; 243};
244 244
245/** 245/*
246 * BFI_IOC_I2H_GETATTR_REPLY message 246 * BFI_IOC_I2H_GETATTR_REPLY message
247 */ 247 */
248struct bfi_ioc_getattr_reply_s { 248struct bfi_ioc_getattr_reply_s {
@@ -251,19 +251,19 @@ struct bfi_ioc_getattr_reply_s {
251 u8 rsvd[3]; 251 u8 rsvd[3];
252}; 252};
253 253
254/** 254/*
255 * Firmware memory page offsets 255 * Firmware memory page offsets
256 */ 256 */
257#define BFI_IOC_SMEM_PG0_CB (0x40) 257#define BFI_IOC_SMEM_PG0_CB (0x40)
258#define BFI_IOC_SMEM_PG0_CT (0x180) 258#define BFI_IOC_SMEM_PG0_CT (0x180)
259 259
260/** 260/*
261 * Firmware statistic offset 261 * Firmware statistic offset
262 */ 262 */
263#define BFI_IOC_FWSTATS_OFF (0x6B40) 263#define BFI_IOC_FWSTATS_OFF (0x6B40)
264#define BFI_IOC_FWSTATS_SZ (4096) 264#define BFI_IOC_FWSTATS_SZ (4096)
265 265
266/** 266/*
267 * Firmware trace offset 267 * Firmware trace offset
268 */ 268 */
269#define BFI_IOC_TRC_OFF (0x4b00) 269#define BFI_IOC_TRC_OFF (0x4b00)
@@ -280,7 +280,7 @@ struct bfi_ioc_image_hdr_s {
280 u32 md5sum[BFI_IOC_MD5SUM_SZ]; 280 u32 md5sum[BFI_IOC_MD5SUM_SZ];
281}; 281};
282 282
283/** 283/*
284 * BFI_IOC_I2H_READY_EVENT message 284 * BFI_IOC_I2H_READY_EVENT message
285 */ 285 */
286struct bfi_ioc_rdy_event_s { 286struct bfi_ioc_rdy_event_s {
@@ -294,7 +294,7 @@ struct bfi_ioc_hbeat_s {
294 u32 hb_count; /* current heart beat count */ 294 u32 hb_count; /* current heart beat count */
295}; 295};
296 296
297/** 297/*
298 * IOC hardware/firmware state 298 * IOC hardware/firmware state
299 */ 299 */
300enum bfi_ioc_state { 300enum bfi_ioc_state {
@@ -340,7 +340,7 @@ enum {
340 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ 340 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
341 BFI_ADAPTER_UNSUPP)) 341 BFI_ADAPTER_UNSUPP))
342 342
343/** 343/*
344 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages 344 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
345 */ 345 */
346struct bfi_ioc_ctrl_req_s { 346struct bfi_ioc_ctrl_req_s {
@@ -352,7 +352,7 @@ struct bfi_ioc_ctrl_req_s {
352#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; 352#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
353#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s; 353#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
354 354
355/** 355/*
356 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages 356 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
357 */ 357 */
358struct bfi_ioc_ctrl_reply_s { 358struct bfi_ioc_ctrl_reply_s {
@@ -364,7 +364,7 @@ struct bfi_ioc_ctrl_reply_s {
364#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; 364#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
365 365
366#define BFI_IOC_MSGSZ 8 366#define BFI_IOC_MSGSZ 8
367/** 367/*
368 * H2I Messages 368 * H2I Messages
369 */ 369 */
370union bfi_ioc_h2i_msg_u { 370union bfi_ioc_h2i_msg_u {
@@ -375,7 +375,7 @@ union bfi_ioc_h2i_msg_u {
375 u32 mboxmsg[BFI_IOC_MSGSZ]; 375 u32 mboxmsg[BFI_IOC_MSGSZ];
376}; 376};
377 377
378/** 378/*
379 * I2H Messages 379 * I2H Messages
380 */ 380 */
381union bfi_ioc_i2h_msg_u { 381union bfi_ioc_i2h_msg_u {
@@ -385,7 +385,7 @@ union bfi_ioc_i2h_msg_u {
385}; 385};
386 386
387 387
388/** 388/*
389 *---------------------------------------------------------------------- 389 *----------------------------------------------------------------------
390 * PBC 390 * PBC
391 *---------------------------------------------------------------------- 391 *----------------------------------------------------------------------
@@ -394,7 +394,7 @@ union bfi_ioc_i2h_msg_u {
394#define BFI_PBC_MAX_BLUNS 8 394#define BFI_PBC_MAX_BLUNS 8
395#define BFI_PBC_MAX_VPORTS 16 395#define BFI_PBC_MAX_VPORTS 16
396 396
397/** 397/*
398 * PBC boot lun configuration 398 * PBC boot lun configuration
399 */ 399 */
400struct bfi_pbc_blun_s { 400struct bfi_pbc_blun_s {
@@ -402,7 +402,7 @@ struct bfi_pbc_blun_s {
402 lun_t tgt_lun; 402 lun_t tgt_lun;
403}; 403};
404 404
405/** 405/*
406 * PBC virtual port configuration 406 * PBC virtual port configuration
407 */ 407 */
408struct bfi_pbc_vport_s { 408struct bfi_pbc_vport_s {
@@ -410,7 +410,7 @@ struct bfi_pbc_vport_s {
410 wwn_t vp_nwwn; 410 wwn_t vp_nwwn;
411}; 411};
412 412
413/** 413/*
414 * BFI pre-boot configuration information 414 * BFI pre-boot configuration information
415 */ 415 */
416struct bfi_pbc_s { 416struct bfi_pbc_s {
@@ -427,7 +427,7 @@ struct bfi_pbc_s {
427 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS]; 427 struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
428}; 428};
429 429
430/** 430/*
431 *---------------------------------------------------------------------- 431 *----------------------------------------------------------------------
432 * MSGQ 432 * MSGQ
433 *---------------------------------------------------------------------- 433 *----------------------------------------------------------------------
@@ -531,7 +531,7 @@ enum bfi_port_i2h {
531 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 531 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
532}; 532};
533 533
534/** 534/*
535 * Generic REQ type 535 * Generic REQ type
536 */ 536 */
537struct bfi_port_generic_req_s { 537struct bfi_port_generic_req_s {
@@ -540,7 +540,7 @@ struct bfi_port_generic_req_s {
540 u32 rsvd; 540 u32 rsvd;
541}; 541};
542 542
543/** 543/*
544 * Generic RSP type 544 * Generic RSP type
545 */ 545 */
546struct bfi_port_generic_rsp_s { 546struct bfi_port_generic_rsp_s {
@@ -550,7 +550,7 @@ struct bfi_port_generic_rsp_s {
550 u32 msgtag; /* msgtag for reply */ 550 u32 msgtag; /* msgtag for reply */
551}; 551};
552 552
553/** 553/*
554 * BFI_PORT_H2I_GET_STATS_REQ 554 * BFI_PORT_H2I_GET_STATS_REQ
555 */ 555 */
556struct bfi_port_get_stats_req_s { 556struct bfi_port_get_stats_req_s {
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 69ac85f9e93..fa9f6fb9d45 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -41,7 +41,7 @@ struct bfi_iocfc_cfg_s {
41 u16 rsvd_1; 41 u16 rsvd_1;
42 u32 endian_sig; /* endian signature of host */ 42 u32 endian_sig; /* endian signature of host */
43 43
44 /** 44 /*
45 * Request and response circular queue base addresses, size and 45 * Request and response circular queue base addresses, size and
46 * shadow index pointers. 46 * shadow index pointers.
47 */ 47 */
@@ -58,7 +58,7 @@ struct bfi_iocfc_cfg_s {
58 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ 58 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
59}; 59};
60 60
61/** 61/*
62 * Boot target wwn information for this port. This contains either the stored 62 * Boot target wwn information for this port. This contains either the stored
63 * or discovered boot target port wwns for the port. 63 * or discovered boot target port wwns for the port.
64 */ 64 */
@@ -75,7 +75,7 @@ struct bfi_iocfc_cfgrsp_s {
75 struct bfi_pbc_s pbc_cfg; 75 struct bfi_pbc_s pbc_cfg;
76}; 76};
77 77
78/** 78/*
79 * BFI_IOCFC_H2I_CFG_REQ message 79 * BFI_IOCFC_H2I_CFG_REQ message
80 */ 80 */
81struct bfi_iocfc_cfg_req_s { 81struct bfi_iocfc_cfg_req_s {
@@ -84,7 +84,7 @@ struct bfi_iocfc_cfg_req_s {
84}; 84};
85 85
86 86
87/** 87/*
88 * BFI_IOCFC_I2H_CFG_REPLY message 88 * BFI_IOCFC_I2H_CFG_REPLY message
89 */ 89 */
90struct bfi_iocfc_cfg_reply_s { 90struct bfi_iocfc_cfg_reply_s {
@@ -95,7 +95,7 @@ struct bfi_iocfc_cfg_reply_s {
95}; 95};
96 96
97 97
98/** 98/*
99 * BFI_IOCFC_H2I_SET_INTR_REQ message 99 * BFI_IOCFC_H2I_SET_INTR_REQ message
100 */ 100 */
101struct bfi_iocfc_set_intr_req_s { 101struct bfi_iocfc_set_intr_req_s {
@@ -107,7 +107,7 @@ struct bfi_iocfc_set_intr_req_s {
107}; 107};
108 108
109 109
110/** 110/*
111 * BFI_IOCFC_H2I_UPDATEQ_REQ message 111 * BFI_IOCFC_H2I_UPDATEQ_REQ message
112 */ 112 */
113struct bfi_iocfc_updateq_req_s { 113struct bfi_iocfc_updateq_req_s {
@@ -119,7 +119,7 @@ struct bfi_iocfc_updateq_req_s {
119}; 119};
120 120
121 121
122/** 122/*
123 * BFI_IOCFC_I2H_UPDATEQ_RSP message 123 * BFI_IOCFC_I2H_UPDATEQ_RSP message
124 */ 124 */
125struct bfi_iocfc_updateq_rsp_s { 125struct bfi_iocfc_updateq_rsp_s {
@@ -129,7 +129,7 @@ struct bfi_iocfc_updateq_rsp_s {
129}; 129};
130 130
131 131
132/** 132/*
133 * H2I Messages 133 * H2I Messages
134 */ 134 */
135union bfi_iocfc_h2i_msg_u { 135union bfi_iocfc_h2i_msg_u {
@@ -140,7 +140,7 @@ union bfi_iocfc_h2i_msg_u {
140}; 140};
141 141
142 142
143/** 143/*
144 * I2H Messages 144 * I2H Messages
145 */ 145 */
146union bfi_iocfc_i2h_msg_u { 146union bfi_iocfc_i2h_msg_u {
@@ -173,7 +173,7 @@ enum bfi_fcport_i2h {
173}; 173};
174 174
175 175
176/** 176/*
177 * Generic REQ type 177 * Generic REQ type
178 */ 178 */
179struct bfi_fcport_req_s { 179struct bfi_fcport_req_s {
@@ -181,7 +181,7 @@ struct bfi_fcport_req_s {
181 u32 msgtag; /* msgtag for reply */ 181 u32 msgtag; /* msgtag for reply */
182}; 182};
183 183
184/** 184/*
185 * Generic RSP type 185 * Generic RSP type
186 */ 186 */
187struct bfi_fcport_rsp_s { 187struct bfi_fcport_rsp_s {
@@ -191,7 +191,7 @@ struct bfi_fcport_rsp_s {
191 u32 msgtag; /* msgtag for reply */ 191 u32 msgtag; /* msgtag for reply */
192}; 192};
193 193
194/** 194/*
195 * BFI_FCPORT_H2I_ENABLE_REQ 195 * BFI_FCPORT_H2I_ENABLE_REQ
196 */ 196 */
197struct bfi_fcport_enable_req_s { 197struct bfi_fcport_enable_req_s {
@@ -205,7 +205,7 @@ struct bfi_fcport_enable_req_s {
205 u32 rsvd2; 205 u32 rsvd2;
206}; 206};
207 207
208/** 208/*
209 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ 209 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
210 */ 210 */
211struct bfi_fcport_set_svc_params_req_s { 211struct bfi_fcport_set_svc_params_req_s {
@@ -214,7 +214,7 @@ struct bfi_fcport_set_svc_params_req_s {
214 u16 rsvd; 214 u16 rsvd;
215}; 215};
216 216
217/** 217/*
218 * BFI_FCPORT_I2H_EVENT 218 * BFI_FCPORT_I2H_EVENT
219 */ 219 */
220struct bfi_fcport_event_s { 220struct bfi_fcport_event_s {
@@ -222,7 +222,7 @@ struct bfi_fcport_event_s {
222 struct bfa_port_link_s link_state; 222 struct bfa_port_link_s link_state;
223}; 223};
224 224
225/** 225/*
226 * BFI_FCPORT_I2H_TRUNK_SCN 226 * BFI_FCPORT_I2H_TRUNK_SCN
227 */ 227 */
228struct bfi_fcport_trunk_link_s { 228struct bfi_fcport_trunk_link_s {
@@ -243,7 +243,7 @@ struct bfi_fcport_trunk_scn_s {
243 struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS]; 243 struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
244}; 244};
245 245
246/** 246/*
247 * fcport H2I message 247 * fcport H2I message
248 */ 248 */
249union bfi_fcport_h2i_msg_u { 249union bfi_fcport_h2i_msg_u {
@@ -255,7 +255,7 @@ union bfi_fcport_h2i_msg_u {
255 struct bfi_fcport_req_s *pstatsclear; 255 struct bfi_fcport_req_s *pstatsclear;
256}; 256};
257 257
258/** 258/*
259 * fcport I2H message 259 * fcport I2H message
260 */ 260 */
261union bfi_fcport_i2h_msg_u { 261union bfi_fcport_i2h_msg_u {
@@ -279,7 +279,7 @@ enum bfi_fcxp_i2h {
279 279
280#define BFA_FCXP_MAX_SGES 2 280#define BFA_FCXP_MAX_SGES 2
281 281
282/** 282/*
283 * FCXP send request structure 283 * FCXP send request structure
284 */ 284 */
285struct bfi_fcxp_send_req_s { 285struct bfi_fcxp_send_req_s {
@@ -299,7 +299,7 @@ struct bfi_fcxp_send_req_s {
299 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ 299 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
300}; 300};
301 301
302/** 302/*
303 * FCXP send response structure 303 * FCXP send response structure
304 */ 304 */
305struct bfi_fcxp_send_rsp_s { 305struct bfi_fcxp_send_rsp_s {
@@ -565,14 +565,14 @@ enum bfi_ioim_i2h {
565 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */ 565 BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */
566}; 566};
567 567
568/** 568/*
569 * IO command DIF info 569 * IO command DIF info
570 */ 570 */
571struct bfi_ioim_dif_s { 571struct bfi_ioim_dif_s {
572 u32 dif_info[4]; 572 u32 dif_info[4];
573}; 573};
574 574
575/** 575/*
576 * FCP IO messages overview 576 * FCP IO messages overview
577 * 577 *
578 * @note 578 * @note
@@ -587,7 +587,7 @@ struct bfi_ioim_req_s {
587 u16 rport_hdl; /* itnim/rport firmware handle */ 587 u16 rport_hdl; /* itnim/rport firmware handle */
588 struct fcp_cmnd_s cmnd; /* IO request info */ 588 struct fcp_cmnd_s cmnd; /* IO request info */
589 589
590 /** 590 /*
591 * SG elements array within the IO request must be double word 591 * SG elements array within the IO request must be double word
592 * aligned. This aligment is required to optimize SGM setup for the IO. 592 * aligned. This aligment is required to optimize SGM setup for the IO.
593 */ 593 */
@@ -598,7 +598,7 @@ struct bfi_ioim_req_s {
598 struct bfi_ioim_dif_s dif; 598 struct bfi_ioim_dif_s dif;
599}; 599};
600 600
601/** 601/*
602 * This table shows various IO status codes from firmware and their 602 * This table shows various IO status codes from firmware and their
603 * meaning. Host driver can use these status codes to further process 603 * meaning. Host driver can use these status codes to further process
604 * IO completions. 604 * IO completions.
@@ -684,7 +684,7 @@ enum bfi_ioim_status {
684}; 684};
685 685
686#define BFI_IOIM_SNSLEN (256) 686#define BFI_IOIM_SNSLEN (256)
687/** 687/*
688 * I/O response message 688 * I/O response message
689 */ 689 */
690struct bfi_ioim_rsp_s { 690struct bfi_ioim_rsp_s {
@@ -746,7 +746,7 @@ enum bfi_tskim_status {
746 BFI_TSKIM_STS_NOT_SUPP = 4, 746 BFI_TSKIM_STS_NOT_SUPP = 4,
747 BFI_TSKIM_STS_FAILED = 5, 747 BFI_TSKIM_STS_FAILED = 5,
748 748
749 /** 749 /*
750 * Defined by BFA 750 * Defined by BFA
751 */ 751 */
752 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ 752 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 99f2b8c5dd6..8c04fada710 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -692,6 +692,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
692 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), 692 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
693 atid, tid, status, csk, csk->state, csk->flags); 693 atid, tid, status, csk, csk->state, csk->flags);
694 694
695 if (status == CPL_ERR_RTX_NEG_ADVICE)
696 goto rel_skb;
697
695 if (status && status != CPL_ERR_TCAM_FULL && 698 if (status && status != CPL_ERR_TCAM_FULL &&
696 status != CPL_ERR_CONN_EXIST && 699 status != CPL_ERR_CONN_EXIST &&
697 status != CPL_ERR_ARP_MISS) 700 status != CPL_ERR_ARP_MISS)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b9bcfa4c7d2..5be3ae15cb7 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -773,6 +773,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
773 {"ENGENIO", "INF-01-00"}, 773 {"ENGENIO", "INF-01-00"},
774 {"STK", "FLEXLINE 380"}, 774 {"STK", "FLEXLINE 380"},
775 {"SUN", "CSM100_R_FC"}, 775 {"SUN", "CSM100_R_FC"},
776 {"SUN", "STK6580_6780"},
777 {"SUN", "SUN_6180"},
776 {NULL, NULL}, 778 {NULL, NULL},
777}; 779};
778 780
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 844d618b84b..d23a538a9df 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -117,7 +117,7 @@ static void fcoe_recv_frame(struct sk_buff *skb);
117 117
118static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); 118static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
119 119
120module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR); 120module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
121__MODULE_PARM_TYPE(create, "string"); 121__MODULE_PARM_TYPE(create, "string");
122MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); 122MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
123module_param_call(create_vn2vn, fcoe_create, NULL, 123module_param_call(create_vn2vn, fcoe_create, NULL,
@@ -1243,7 +1243,6 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1243 struct fcoe_interface *fcoe; 1243 struct fcoe_interface *fcoe;
1244 struct fc_frame_header *fh; 1244 struct fc_frame_header *fh;
1245 struct fcoe_percpu_s *fps; 1245 struct fcoe_percpu_s *fps;
1246 struct fcoe_port *port;
1247 struct ethhdr *eh; 1246 struct ethhdr *eh;
1248 unsigned int cpu; 1247 unsigned int cpu;
1249 1248
@@ -1262,16 +1261,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1262 skb_tail_pointer(skb), skb_end_pointer(skb), 1261 skb_tail_pointer(skb), skb_end_pointer(skb),
1263 skb->csum, skb->dev ? skb->dev->name : "<NULL>"); 1262 skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1264 1263
1265 /* check for mac addresses */
1266 eh = eth_hdr(skb); 1264 eh = eth_hdr(skb);
1267 port = lport_priv(lport);
1268 if (compare_ether_addr(eh->h_dest, port->data_src_addr) &&
1269 compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) &&
1270 compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) {
1271 FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n",
1272 eh->h_dest);
1273 goto err;
1274 }
1275 1265
1276 if (is_fip_mode(&fcoe->ctlr) && 1266 if (is_fip_mode(&fcoe->ctlr) &&
1277 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { 1267 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
@@ -1291,6 +1281,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1291 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1281 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1292 fh = (struct fc_frame_header *) skb_transport_header(skb); 1282 fh = (struct fc_frame_header *) skb_transport_header(skb);
1293 1283
1284 if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
1285 FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
1286 eh->h_dest);
1287 goto err;
1288 }
1289
1294 fr = fcoe_dev_from_skb(skb); 1290 fr = fcoe_dev_from_skb(skb);
1295 fr->fr_dev = lport; 1291 fr->fr_dev = lport;
1296 fr->ptype = ptype; 1292 fr->ptype = ptype;
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index aa503d83092..bc17c712320 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -2296,7 +2296,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2296{ 2296{
2297 struct fip_header *fiph; 2297 struct fip_header *fiph;
2298 enum fip_vn2vn_subcode sub; 2298 enum fip_vn2vn_subcode sub;
2299 union { 2299 struct {
2300 struct fc_rport_priv rdata; 2300 struct fc_rport_priv rdata;
2301 struct fcoe_rport frport; 2301 struct fcoe_rport frport;
2302 } buf; 2302 } buf;
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 5a3f9310101..841101846b8 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4177,6 +4177,14 @@ static int ioc_general(void __user *arg, char *cmnd)
4177 ha = gdth_find_ha(gen.ionode); 4177 ha = gdth_find_ha(gen.ionode);
4178 if (!ha) 4178 if (!ha)
4179 return -EFAULT; 4179 return -EFAULT;
4180
4181 if (gen.data_len > INT_MAX)
4182 return -EINVAL;
4183 if (gen.sense_len > INT_MAX)
4184 return -EINVAL;
4185 if (gen.data_len + gen.sense_len > INT_MAX)
4186 return -EINVAL;
4187
4180 if (gen.data_len + gen.sense_len != 0) { 4188 if (gen.data_len + gen.sense_len != 0) {
4181 if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, 4189 if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
4182 FALSE, &paddr))) 4190 FALSE, &paddr)))
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df9a12c8b37..fa60d7df44b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9025,6 +9025,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
9025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, 9025 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9026 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, 9027 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9028 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9029 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 9031 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9030 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9032 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index aa8bb2f2c6e..b28a00f1082 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -82,6 +82,7 @@
82 82
83#define IPR_SUBS_DEV_ID_57B4 0x033B 83#define IPR_SUBS_DEV_ID_57B4 0x033B
84#define IPR_SUBS_DEV_ID_57B2 0x035F 84#define IPR_SUBS_DEV_ID_57B2 0x035F
85#define IPR_SUBS_DEV_ID_57C4 0x0354
85#define IPR_SUBS_DEV_ID_57C6 0x0357 86#define IPR_SUBS_DEV_ID_57C6 0x0357
86#define IPR_SUBS_DEV_ID_57CC 0x035C 87#define IPR_SUBS_DEV_ID_57CC 0x035C
87 88
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 32f67c4b03f..911b2736caf 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -684,10 +684,9 @@ void fc_disc_stop(struct fc_lport *lport)
684{ 684{
685 struct fc_disc *disc = &lport->disc; 685 struct fc_disc *disc = &lport->disc;
686 686
687 if (disc) { 687 if (disc->pending)
688 cancel_delayed_work_sync(&disc->disc_work); 688 cancel_delayed_work_sync(&disc->disc_work);
689 fc_disc_stop_rports(disc); 689 fc_disc_stop_rports(disc);
690 }
691} 690}
692 691
693/** 692/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c797f6b48f0..e340373b509 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -58,8 +58,7 @@ struct kmem_cache *scsi_pkt_cachep;
58#define FC_SRB_WRITE (1 << 0) 58#define FC_SRB_WRITE (1 << 0)
59 59
60/* 60/*
61 * The SCp.ptr should be tested and set under the host lock. NULL indicates 61 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
62 * that the command has been retruned to the scsi layer.
63 */ 62 */
64#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 63#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
65#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 64#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
@@ -1880,8 +1879,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1880 1879
1881 lport = fsp->lp; 1880 lport = fsp->lp;
1882 si = fc_get_scsi_internal(lport); 1881 si = fc_get_scsi_internal(lport);
1883 if (!fsp->cmd)
1884 return;
1885 1882
1886 /* 1883 /*
1887 * if can_queue ramp down is done then try can_queue ramp up 1884 * if can_queue ramp down is done then try can_queue ramp up
@@ -1891,11 +1888,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1891 fc_fcp_can_queue_ramp_up(lport); 1888 fc_fcp_can_queue_ramp_up(lport);
1892 1889
1893 sc_cmd = fsp->cmd; 1890 sc_cmd = fsp->cmd;
1894 fsp->cmd = NULL;
1895
1896 if (!sc_cmd->SCp.ptr)
1897 return;
1898
1899 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1891 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1900 switch (fsp->status_code) { 1892 switch (fsp->status_code) {
1901 case FC_COMPLETE: 1893 case FC_COMPLETE:
@@ -1971,15 +1963,13 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1971 break; 1963 break;
1972 } 1964 }
1973 1965
1974 if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) { 1966 if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
1975 sc_cmd->result = (DID_REQUEUE << 16); 1967 sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
1976 FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n");
1977 }
1978 1968
1979 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1969 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1980 list_del(&fsp->list); 1970 list_del(&fsp->list);
1981 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1982 sc_cmd->SCp.ptr = NULL; 1971 sc_cmd->SCp.ptr = NULL;
1972 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1983 sc_cmd->scsi_done(sc_cmd); 1973 sc_cmd->scsi_done(sc_cmd);
1984 1974
1985 /* release ref from initial allocation in queue command */ 1975 /* release ref from initial allocation in queue command */
@@ -1997,6 +1987,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1997{ 1987{
1998 struct fc_fcp_pkt *fsp; 1988 struct fc_fcp_pkt *fsp;
1999 struct fc_lport *lport; 1989 struct fc_lport *lport;
1990 struct fc_fcp_internal *si;
2000 int rc = FAILED; 1991 int rc = FAILED;
2001 unsigned long flags; 1992 unsigned long flags;
2002 1993
@@ -2006,7 +1997,8 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
2006 else if (!lport->link_up) 1997 else if (!lport->link_up)
2007 return rc; 1998 return rc;
2008 1999
2009 spin_lock_irqsave(lport->host->host_lock, flags); 2000 si = fc_get_scsi_internal(lport);
2001 spin_lock_irqsave(&si->scsi_queue_lock, flags);
2010 fsp = CMD_SP(sc_cmd); 2002 fsp = CMD_SP(sc_cmd);
2011 if (!fsp) { 2003 if (!fsp) {
2012 /* command completed while scsi eh was setting up */ 2004 /* command completed while scsi eh was setting up */
@@ -2015,7 +2007,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
2015 } 2007 }
2016 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ 2008 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
2017 fc_fcp_pkt_hold(fsp); 2009 fc_fcp_pkt_hold(fsp);
2018 spin_unlock_irqrestore(lport->host->host_lock, flags); 2010 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
2019 2011
2020 if (fc_fcp_lock_pkt(fsp)) { 2012 if (fc_fcp_lock_pkt(fsp)) {
2021 /* completed while we were waiting for timer to be deleted */ 2013 /* completed while we were waiting for timer to be deleted */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d9b6e11b0e8..9be63edbf8f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1447,13 +1447,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1447 } 1447 }
1448 1448
1449 did = fc_frame_did(fp); 1449 did = fc_frame_did(fp);
1450 1450 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
1451 if (!did) {
1452 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1453 goto out;
1454 }
1455
1456 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
1457 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1451 flp = fc_frame_payload_get(fp, sizeof(*flp));
1458 if (flp) { 1452 if (flp) {
1459 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1453 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1492,8 +1486,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1492 fc_lport_enter_dns(lport); 1486 fc_lport_enter_dns(lport);
1493 } 1487 }
1494 } 1488 }
1495 } else 1489 } else {
1490 FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
1496 fc_lport_error(lport, fp); 1491 fc_lport_error(lport, fp);
1492 }
1497 1493
1498out: 1494out:
1499 fc_frame_free(fp); 1495 fc_frame_free(fp);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9f2286fe0c..a84ef13ed74 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -196,9 +196,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata)
196void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) 196void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
197{ 197{
198 if (timeout) 198 if (timeout)
199 rport->dev_loss_tmo = timeout + 5; 199 rport->dev_loss_tmo = timeout;
200 else 200 else
201 rport->dev_loss_tmo = 30; 201 rport->dev_loss_tmo = 1;
202} 202}
203EXPORT_SYMBOL(fc_set_rport_loss_tmo); 203EXPORT_SYMBOL(fc_set_rport_loss_tmo);
204 204
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a50aa03b8ac..196de40b906 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -202,9 +202,12 @@ struct lpfc_stats {
202 uint32_t elsRcvPRLO; 202 uint32_t elsRcvPRLO;
203 uint32_t elsRcvPRLI; 203 uint32_t elsRcvPRLI;
204 uint32_t elsRcvLIRR; 204 uint32_t elsRcvLIRR;
205 uint32_t elsRcvRLS;
205 uint32_t elsRcvRPS; 206 uint32_t elsRcvRPS;
206 uint32_t elsRcvRPL; 207 uint32_t elsRcvRPL;
207 uint32_t elsRcvRRQ; 208 uint32_t elsRcvRRQ;
209 uint32_t elsRcvRTV;
210 uint32_t elsRcvECHO;
208 uint32_t elsXmitFLOGI; 211 uint32_t elsXmitFLOGI;
209 uint32_t elsXmitFDISC; 212 uint32_t elsXmitFDISC;
210 uint32_t elsXmitPLOGI; 213 uint32_t elsXmitPLOGI;
@@ -549,9 +552,11 @@ struct lpfc_hba {
549#define ELS_XRI_ABORT_EVENT 0x40 552#define ELS_XRI_ABORT_EVENT 0x40
550#define ASYNC_EVENT 0x80 553#define ASYNC_EVENT 0x80
551#define LINK_DISABLED 0x100 /* Link disabled by user */ 554#define LINK_DISABLED 0x100 /* Link disabled by user */
552#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 555#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
553#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ 556#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
554#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ 557#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
558#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
559#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
555 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 560 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
556 struct lpfc_dmabuf slim2p; 561 struct lpfc_dmabuf slim2p;
557 562
@@ -573,6 +578,7 @@ struct lpfc_hba {
573 /* These fields used to be binfo */ 578 /* These fields used to be binfo */
574 uint32_t fc_pref_DID; /* preferred D_ID */ 579 uint32_t fc_pref_DID; /* preferred D_ID */
575 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 580 uint8_t fc_pref_ALPA; /* preferred AL_PA */
581 uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
576 uint32_t fc_edtov; /* E_D_TOV timer value */ 582 uint32_t fc_edtov; /* E_D_TOV timer value */
577 uint32_t fc_arbtov; /* ARB_TOV timer value */ 583 uint32_t fc_arbtov; /* ARB_TOV timer value */
578 uint32_t fc_ratov; /* R_A_TOV timer value */ 584 uint32_t fc_ratov; /* R_A_TOV timer value */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f681eea5773..c1cbec01345 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
3789 break; 3789 break;
3790 case MBX_SECURITY_MGMT: 3790 case MBX_SECURITY_MGMT:
3791 case MBX_AUTH_PORT: 3791 case MBX_AUTH_PORT:
3792 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) 3792 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
3793 printk(KERN_WARNING "mbox_read:Command 0x%x "
3794 "is not permitted\n", pmb->mbxCommand);
3795 sysfs_mbox_idle(phba);
3796 spin_unlock_irq(&phba->hbalock);
3793 return -EPERM; 3797 return -EPERM;
3798 }
3794 break; 3799 break;
3795 case MBX_READ_SPARM64: 3800 case MBX_READ_SPARM64:
3796 case MBX_READ_LA: 3801 case MBX_READ_LA:
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d60b55f53..7260c3af555 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
3142 job = menlo->set_job; 3142 job = menlo->set_job;
3143 job->dd_data = NULL; /* so timeout handler does not reply */ 3143 job->dd_data = NULL; /* so timeout handler does not reply */
3144 3144
3145 spin_lock_irqsave(&phba->hbalock, flags); 3145 spin_lock(&phba->hbalock);
3146 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3146 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3147 if (cmdiocbq->context2 && rspiocbq) 3147 if (cmdiocbq->context2 && rspiocbq)
3148 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3148 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3149 &rspiocbq->iocb, sizeof(IOCB_t)); 3149 &rspiocbq->iocb, sizeof(IOCB_t));
3150 spin_unlock_irqrestore(&phba->hbalock, flags); 3150 spin_unlock(&phba->hbalock);
3151 3151
3152 bmp = menlo->bmp; 3152 bmp = menlo->bmp;
3153 rspiocbq = menlo->rspiocbq; 3153 rspiocbq = menlo->rspiocbq;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 03f4ddc1857..a5f5a093a8a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
44void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 44void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
45void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
46void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 46void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
47void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
48
47void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); 49void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
48void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, 50void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
49 struct lpfc_nodelist *); 51 struct lpfc_nodelist *);
@@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
229uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 231uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
230int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 232int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
231void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 233void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
234int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
232 235
233int lpfc_mem_alloc(struct lpfc_hba *, int align); 236int lpfc_mem_alloc(struct lpfc_hba *, int align);
234void lpfc_mem_free(struct lpfc_hba *); 237void lpfc_mem_free(struct lpfc_hba *);
@@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
271void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 274void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
272void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 275void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
273void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 276void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
277void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
274void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 278void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
275int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 279int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
276 struct lpfc_dmabuf *); 280 struct lpfc_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e6ca12f6c6c..884f4d32179 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
177 (elscmd == ELS_CMD_LOGO))) 177 (elscmd == ELS_CMD_LOGO)))
178 switch (elscmd) { 178 switch (elscmd) {
179 case ELS_CMD_FLOGI: 179 case ELS_CMD_FLOGI:
180 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 180 elsiocb->iocb_flag |=
181 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
181 & LPFC_FIP_ELS_ID_MASK); 182 & LPFC_FIP_ELS_ID_MASK);
182 break; 183 break;
183 case ELS_CMD_FDISC: 184 case ELS_CMD_FDISC:
184 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 185 elsiocb->iocb_flag |=
186 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
185 & LPFC_FIP_ELS_ID_MASK); 187 & LPFC_FIP_ELS_ID_MASK);
186 break; 188 break;
187 case ELS_CMD_LOGO: 189 case ELS_CMD_LOGO:
188 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 190 elsiocb->iocb_flag |=
191 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
189 & LPFC_FIP_ELS_ID_MASK); 192 & LPFC_FIP_ELS_ID_MASK);
190 break; 193 break;
191 } 194 }
@@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
517 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 520 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
518 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 521 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
519 522
523 phba->fc_edtovResol = sp->cmn.edtovResolution;
520 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 524 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
521 525
522 if (phba->fc_topology == TOPOLOGY_LOOP) { 526 if (phba->fc_topology == TOPOLOGY_LOOP) {
523 spin_lock_irq(shost->host_lock); 527 spin_lock_irq(shost->host_lock);
524 vport->fc_flag |= FC_PUBLIC_LOOP; 528 vport->fc_flag |= FC_PUBLIC_LOOP;
525 spin_unlock_irq(shost->host_lock); 529 spin_unlock_irq(shost->host_lock);
526 } else {
527 /*
528 * If we are a N-port connected to a Fabric, fixup sparam's so
529 * logins to devices on remote loops work.
530 */
531 vport->fc_sparam.cmn.altBbCredit = 1;
532 } 530 }
533 531
534 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 532 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
@@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
585 lpfc_unreg_rpi(vport, np); 583 lpfc_unreg_rpi(vport, np);
586 } 584 }
587 lpfc_cleanup_pending_mbox(vport); 585 lpfc_cleanup_pending_mbox(vport);
586
587 if (phba->sli_rev == LPFC_SLI_REV4)
588 lpfc_sli4_unreg_all_rpis(vport);
589
588 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 590 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
589 lpfc_mbx_unreg_vpi(vport); 591 lpfc_mbx_unreg_vpi(vport);
590 spin_lock_irq(shost->host_lock); 592 spin_lock_irq(shost->host_lock);
@@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
800 802
801 if (irsp->ulpStatus) { 803 if (irsp->ulpStatus) {
802 /* 804 /*
803 * In case of FIP mode, perform round robin FCF failover 805 * In case of FIP mode, perform roundrobin FCF failover
804 * due to new FCF discovery 806 * due to new FCF discovery
805 */ 807 */
806 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 808 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
@@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
808 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && 810 (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
809 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { 811 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
810 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 812 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
811 "2611 FLOGI failed on registered " 813 "2611 FLOGI failed on FCF (x%x), "
812 "FCF record fcf_index(%d), status: " 814 "status:x%x/x%x, tmo:x%x, perform "
813 "x%x/x%x, tmo:x%x, trying to perform " 815 "roundrobin FCF failover\n",
814 "round robin failover\n",
815 phba->fcf.current_rec.fcf_indx, 816 phba->fcf.current_rec.fcf_indx,
816 irsp->ulpStatus, irsp->un.ulpWord[4], 817 irsp->ulpStatus, irsp->un.ulpWord[4],
817 irsp->ulpTimeout); 818 irsp->ulpTimeout);
818 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 819 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
819 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 820 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
820 /* 821 if (rc)
821 * Exhausted the eligible FCF record list, 822 goto out;
822 * fail through to retry FLOGI on current
823 * FCF record.
824 */
825 lpfc_printf_log(phba, KERN_WARNING,
826 LOG_FIP | LOG_ELS,
827 "2760 Completed one round "
828 "of FLOGI FCF round robin "
829 "failover list, retry FLOGI "
830 "on currently registered "
831 "FCF index:%d\n",
832 phba->fcf.current_rec.fcf_indx);
833 } else {
834 lpfc_printf_log(phba, KERN_INFO,
835 LOG_FIP | LOG_ELS,
836 "2794 FLOGI FCF round robin "
837 "failover to FCF index x%x\n",
838 fcf_index);
839 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
840 fcf_index);
841 if (rc)
842 lpfc_printf_log(phba, KERN_WARNING,
843 LOG_FIP | LOG_ELS,
844 "2761 FLOGI round "
845 "robin FCF failover "
846 "read FCF failed "
847 "rc:x%x, fcf_index:"
848 "%d\n", rc,
849 phba->fcf.current_rec.fcf_indx);
850 else
851 goto out;
852 }
853 } 823 }
854 824
855 /* FLOGI failure */ 825 /* FLOGI failure */
@@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
939 lpfc_nlp_put(ndlp); 909 lpfc_nlp_put(ndlp);
940 spin_lock_irq(&phba->hbalock); 910 spin_lock_irq(&phba->hbalock);
941 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 911 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
912 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
942 spin_unlock_irq(&phba->hbalock); 913 spin_unlock_irq(&phba->hbalock);
943 goto out; 914 goto out;
944 } 915 }
@@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
947 if (phba->hba_flag & HBA_FIP_SUPPORT) 918 if (phba->hba_flag & HBA_FIP_SUPPORT)
948 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 919 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
949 LOG_ELS, 920 LOG_ELS,
950 "2769 FLOGI successful on FCF " 921 "2769 FLOGI to FCF (x%x) "
951 "record: current_fcf_index:" 922 "completed successfully\n",
952 "x%x, terminate FCF round "
953 "robin failover process\n",
954 phba->fcf.current_rec.fcf_indx); 923 phba->fcf.current_rec.fcf_indx);
955 spin_lock_irq(&phba->hbalock); 924 spin_lock_irq(&phba->hbalock);
956 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 925 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
926 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
957 spin_unlock_irq(&phba->hbalock); 927 spin_unlock_irq(&phba->hbalock);
958 goto out; 928 goto out;
959 } 929 }
@@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
1175 return 0; 1145 return 0;
1176 } 1146 }
1177 1147
1178 if (lpfc_issue_els_flogi(vport, ndlp, 0)) 1148 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1179 /* This decrement of reference count to node shall kick off 1149 /* This decrement of reference count to node shall kick off
1180 * the release of the node. 1150 * the release of the node.
1181 */ 1151 */
1182 lpfc_nlp_put(ndlp); 1152 lpfc_nlp_put(ndlp);
1183 1153 return 0;
1154 }
1184 return 1; 1155 return 1;
1185} 1156}
1186 1157
@@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1645 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1616 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1646 sp = (struct serv_parm *) pcmd; 1617 sp = (struct serv_parm *) pcmd;
1647 1618
1619 /*
1620 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1621 * to device on remote loops work.
1622 */
1623 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
1624 sp->cmn.altBbCredit = 1;
1625
1648 if (sp->cmn.fcphLow < FC_PH_4_3) 1626 if (sp->cmn.fcphLow < FC_PH_4_3)
1649 sp->cmn.fcphLow = FC_PH_4_3; 1627 sp->cmn.fcphLow = FC_PH_4_3;
1650 1628
@@ -3926,6 +3904,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3926} 3904}
3927 3905
3928/** 3906/**
3907 * lpfc_els_rsp_echo_acc - Issue echo acc response
3908 * @vport: pointer to a virtual N_Port data structure.
3909 * @data: pointer to echo data to return in the accept.
3910 * @oldiocb: pointer to the original lpfc command iocb data structure.
3911 * @ndlp: pointer to a node-list data structure.
3912 *
3913 * Return code
3914 * 0 - Successfully issued acc echo response
3915 * 1 - Failed to issue acc echo response
3916 **/
3917static int
3918lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
3919 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3920{
3921 struct lpfc_hba *phba = vport->phba;
3922 struct lpfc_iocbq *elsiocb;
3923 struct lpfc_sli *psli;
3924 uint8_t *pcmd;
3925 uint16_t cmdsize;
3926 int rc;
3927
3928 psli = &phba->sli;
3929 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
3930
3931 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3932 ndlp->nlp_DID, ELS_CMD_ACC);
3933 if (!elsiocb)
3934 return 1;
3935
3936 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
3937 /* Xmit ECHO ACC response tag <ulpIoTag> */
3938 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3939 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
3940 elsiocb->iotag, elsiocb->iocb.ulpContext);
3941 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3942 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3943 pcmd += sizeof(uint32_t);
3944 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
3945
3946 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3947 "Issue ACC ECHO: did:x%x flg:x%x",
3948 ndlp->nlp_DID, ndlp->nlp_flag, 0);
3949
3950 phba->fc_stat.elsXmitACC++;
3951 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3952 lpfc_nlp_put(ndlp);
3953 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3954 * it could be freed */
3955
3956 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3957 if (rc == IOCB_ERROR) {
3958 lpfc_els_free_iocb(phba, elsiocb);
3959 return 1;
3960 }
3961 return 0;
3962}
3963
3964/**
3929 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 3965 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
3930 * @vport: pointer to a host virtual N_Port data structure. 3966 * @vport: pointer to a host virtual N_Port data structure.
3931 * 3967 *
@@ -4684,6 +4720,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4684} 4720}
4685 4721
4686/** 4722/**
4723 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
4724 * @vport: pointer to a host virtual N_Port data structure.
4725 * @cmdiocb: pointer to lpfc command iocb data structure.
4726 * @ndlp: pointer to a node-list data structure.
4727 *
4728 * Return code
4729 * 0 - Successfully processed echo iocb (currently always return 0)
4730 **/
4731static int
4732lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4733 struct lpfc_nodelist *ndlp)
4734{
4735 uint8_t *pcmd;
4736
4737 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
4738
4739 /* skip over first word of echo command to find echo data */
4740 pcmd += sizeof(uint32_t);
4741
4742 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
4743 return 0;
4744}
4745
4746/**
4687 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 4747 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
4688 * @vport: pointer to a host virtual N_Port data structure. 4748 * @vport: pointer to a host virtual N_Port data structure.
4689 * @cmdiocb: pointer to lpfc command iocb data structure. 4749 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -4735,6 +4795,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4735} 4795}
4736 4796
4737/** 4797/**
4798 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4799 * @phba: pointer to lpfc hba data structure.
4800 * @pmb: pointer to the driver internal queue element for mailbox command.
4801 *
4802 * This routine is the completion callback function for the MBX_READ_LNK_STAT
4803 * mailbox command. This callback function is to actually send the Accept
4804 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
4805 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
4806 * mailbox command, constructs the RPS response with the link statistics
4807 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
4808 * response to the RPS.
4809 *
4810 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4811 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4812 * will be stored into the context1 field of the IOCB for the completion
4813 * callback function to the RPS Accept Response ELS IOCB command.
4814 *
4815 **/
4816static void
4817lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4818{
4819 MAILBOX_t *mb;
4820 IOCB_t *icmd;
4821 struct RLS_RSP *rls_rsp;
4822 uint8_t *pcmd;
4823 struct lpfc_iocbq *elsiocb;
4824 struct lpfc_nodelist *ndlp;
4825 uint16_t xri;
4826 uint32_t cmdsize;
4827
4828 mb = &pmb->u.mb;
4829
4830 ndlp = (struct lpfc_nodelist *) pmb->context2;
4831 xri = (uint16_t) ((unsigned long)(pmb->context1));
4832 pmb->context1 = NULL;
4833 pmb->context2 = NULL;
4834
4835 if (mb->mbxStatus) {
4836 mempool_free(pmb, phba->mbox_mem_pool);
4837 return;
4838 }
4839
4840 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
4841 mempool_free(pmb, phba->mbox_mem_pool);
4842 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
4843 lpfc_max_els_tries, ndlp,
4844 ndlp->nlp_DID, ELS_CMD_ACC);
4845
4846 /* Decrement the ndlp reference count from previous mbox command */
4847 lpfc_nlp_put(ndlp);
4848
4849 if (!elsiocb)
4850 return;
4851
4852 icmd = &elsiocb->iocb;
4853 icmd->ulpContext = xri;
4854
4855 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4856 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4857 pcmd += sizeof(uint32_t); /* Skip past command */
4858 rls_rsp = (struct RLS_RSP *)pcmd;
4859
4860 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
4861 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
4862 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
4863 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
4864 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
4865 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
4866
4867 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
4868 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
4869 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
4870 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4871 elsiocb->iotag, elsiocb->iocb.ulpContext,
4872 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4873 ndlp->nlp_rpi);
4874 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4875 phba->fc_stat.elsXmitACC++;
4876 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4877 lpfc_els_free_iocb(phba, elsiocb);
4878}
4879
4880/**
4738 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 4881 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4739 * @phba: pointer to lpfc hba data structure. 4882 * @phba: pointer to lpfc hba data structure.
4740 * @pmb: pointer to the driver internal queue element for mailbox command. 4883 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4827} 4970}
4828 4971
4829/** 4972/**
4830 * lpfc_els_rcv_rps - Process an unsolicited rps iocb 4973 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
4974 * @vport: pointer to a host virtual N_Port data structure.
4975 * @cmdiocb: pointer to lpfc command iocb data structure.
4976 * @ndlp: pointer to a node-list data structure.
4977 *
4978 * This routine processes Read Port Status (RPL) IOCB received as an
4979 * ELS unsolicited event. It first checks the remote port state. If the
4980 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
4981 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
4982 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
4983 * for reading the HBA link statistics. It is for the callback function,
4984 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
4985 * to actually sending out RPL Accept (ACC) response.
4986 *
4987 * Return codes
4988 * 0 - Successfully processed rls iocb (currently always return 0)
4989 **/
4990static int
4991lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4992 struct lpfc_nodelist *ndlp)
4993{
4994 struct lpfc_hba *phba = vport->phba;
4995 LPFC_MBOXQ_t *mbox;
4996 struct lpfc_dmabuf *pcmd;
4997 struct ls_rjt stat;
4998
4999 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5000 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5001 /* reject the unsolicited RPS request and done with it */
5002 goto reject_out;
5003
5004 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5005
5006 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5007 if (mbox) {
5008 lpfc_read_lnk_stat(phba, mbox);
5009 mbox->context1 =
5010 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
5011 mbox->context2 = lpfc_nlp_get(ndlp);
5012 mbox->vport = vport;
5013 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
5014 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5015 != MBX_NOT_FINISHED)
5016 /* Mbox completion will send ELS Response */
5017 return 0;
5018 /* Decrement reference count used for the failed mbox
5019 * command.
5020 */
5021 lpfc_nlp_put(ndlp);
5022 mempool_free(mbox, phba->mbox_mem_pool);
5023 }
5024reject_out:
5025 /* issue rejection response */
5026 stat.un.b.lsRjtRsvd0 = 0;
5027 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5028 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5029 stat.un.b.vendorUnique = 0;
5030 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5031 return 0;
5032}
5033
5034/**
5035 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
5036 * @vport: pointer to a host virtual N_Port data structure.
5037 * @cmdiocb: pointer to lpfc command iocb data structure.
5038 * @ndlp: pointer to a node-list data structure.
5039 *
5040 * This routine processes Read Timout Value (RTV) IOCB received as an
5041 * ELS unsolicited event. It first checks the remote port state. If the
5042 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
5043 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
5044 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
5045 * Value (RTV) unsolicited IOCB event.
5046 *
5047 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5048 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5049 * will be stored into the context1 field of the IOCB for the completion
5050 * callback function to the RPS Accept Response ELS IOCB command.
5051 *
5052 * Return codes
5053 * 0 - Successfully processed rtv iocb (currently always return 0)
5054 **/
5055static int
5056lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5057 struct lpfc_nodelist *ndlp)
5058{
5059 struct lpfc_hba *phba = vport->phba;
5060 struct ls_rjt stat;
5061 struct RTV_RSP *rtv_rsp;
5062 uint8_t *pcmd;
5063 struct lpfc_iocbq *elsiocb;
5064 uint32_t cmdsize;
5065
5066
5067 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5068 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
5069 /* reject the unsolicited RPS request and done with it */
5070 goto reject_out;
5071
5072 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
5073 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5074 lpfc_max_els_tries, ndlp,
5075 ndlp->nlp_DID, ELS_CMD_ACC);
5076
5077 if (!elsiocb)
5078 return 1;
5079
5080 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5081 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5082 pcmd += sizeof(uint32_t); /* Skip past command */
5083
5084 /* use the command's xri in the response */
5085 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
5086
5087 rtv_rsp = (struct RTV_RSP *)pcmd;
5088
5089 /* populate RTV payload */
5090 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
5091 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
5092 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
5093 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
5094 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
5095
5096 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
5097 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
5098 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
5099 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
5100 "Data: x%x x%x x%x\n",
5101 elsiocb->iotag, elsiocb->iocb.ulpContext,
5102 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5103 ndlp->nlp_rpi,
5104 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
5105 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5106 phba->fc_stat.elsXmitACC++;
5107 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
5108 lpfc_els_free_iocb(phba, elsiocb);
5109 return 0;
5110
5111reject_out:
5112 /* issue rejection response */
5113 stat.un.b.lsRjtRsvd0 = 0;
5114 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5115 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5116 stat.un.b.vendorUnique = 0;
5117 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5118 return 0;
5119}
5120
5121/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
4831 * @vport: pointer to a host virtual N_Port data structure. 5122 * @vport: pointer to a host virtual N_Port data structure.
4832 * @cmdiocb: pointer to lpfc command iocb data structure. 5123 * @cmdiocb: pointer to lpfc command iocb data structure.
4833 * @ndlp: pointer to a node-list data structure. 5124 * @ndlp: pointer to a node-list data structure.
@@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5017 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5308 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5018 lp = (uint32_t *) pcmd->virt; 5309 lp = (uint32_t *) pcmd->virt;
5019 rpl = (RPL *) (lp + 1); 5310 rpl = (RPL *) (lp + 1);
5020
5021 maxsize = be32_to_cpu(rpl->maxsize); 5311 maxsize = be32_to_cpu(rpl->maxsize);
5022 5312
5023 /* We support only one port */ 5313 /* We support only one port */
@@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5836 if (newnode) 6126 if (newnode)
5837 lpfc_nlp_put(ndlp); 6127 lpfc_nlp_put(ndlp);
5838 break; 6128 break;
6129 case ELS_CMD_RLS:
6130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6131 "RCV RLS: did:x%x/ste:x%x flg:x%x",
6132 did, vport->port_state, ndlp->nlp_flag);
6133
6134 phba->fc_stat.elsRcvRLS++;
6135 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
6136 if (newnode)
6137 lpfc_nlp_put(ndlp);
6138 break;
5839 case ELS_CMD_RPS: 6139 case ELS_CMD_RPS:
5840 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6140 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5841 "RCV RPS: did:x%x/ste:x%x flg:x%x", 6141 "RCV RPS: did:x%x/ste:x%x flg:x%x",
@@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5866 if (newnode) 6166 if (newnode)
5867 lpfc_nlp_put(ndlp); 6167 lpfc_nlp_put(ndlp);
5868 break; 6168 break;
6169 case ELS_CMD_RTV:
6170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6171 "RCV RTV: did:x%x/ste:x%x flg:x%x",
6172 did, vport->port_state, ndlp->nlp_flag);
6173 phba->fc_stat.elsRcvRTV++;
6174 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
6175 if (newnode)
6176 lpfc_nlp_put(ndlp);
6177 break;
5869 case ELS_CMD_RRQ: 6178 case ELS_CMD_RRQ:
5870 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6179 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5871 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 6180 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
@@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5876 if (newnode) 6185 if (newnode)
5877 lpfc_nlp_put(ndlp); 6186 lpfc_nlp_put(ndlp);
5878 break; 6187 break;
6188 case ELS_CMD_ECHO:
6189 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6190 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
6191 did, vport->port_state, ndlp->nlp_flag);
6192
6193 phba->fc_stat.elsRcvECHO++;
6194 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
6195 if (newnode)
6196 lpfc_nlp_put(ndlp);
6197 break;
5879 default: 6198 default:
5880 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6199 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5881 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 6200 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6170 6489
6171 default: 6490 default:
6172 /* Try to recover from this error */ 6491 /* Try to recover from this error */
6492 if (phba->sli_rev == LPFC_SLI_REV4)
6493 lpfc_sli4_unreg_all_rpis(vport);
6173 lpfc_mbx_unreg_vpi(vport); 6494 lpfc_mbx_unreg_vpi(vport);
6174 spin_lock_irq(shost->host_lock); 6495 spin_lock_irq(shost->host_lock);
6175 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6496 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6437 lpfc_unreg_rpi(vport, np); 6758 lpfc_unreg_rpi(vport, np);
6438 } 6759 }
6439 lpfc_cleanup_pending_mbox(vport); 6760 lpfc_cleanup_pending_mbox(vport);
6761
6762 if (phba->sli_rev == LPFC_SLI_REV4)
6763 lpfc_sli4_unreg_all_rpis(vport);
6764
6440 lpfc_mbx_unreg_vpi(vport); 6765 lpfc_mbx_unreg_vpi(vport);
6441 spin_lock_irq(shost->host_lock); 6766 spin_lock_irq(shost->host_lock);
6442 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6767 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6452 * to update the MAC address. 6777 * to update the MAC address.
6453 */ 6778 */
6454 lpfc_register_new_vport(phba, vport, ndlp); 6779 lpfc_register_new_vport(phba, vport, ndlp);
6455 return ; 6780 goto out;
6456 } 6781 }
6457 6782
6458 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 6783 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a345dde16c8..a5d1695dac3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/delay.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = {
63static void lpfc_disc_timeout_handler(struct lpfc_vport *); 64static void lpfc_disc_timeout_handler(struct lpfc_vport *);
64static void lpfc_disc_flush_list(struct lpfc_vport *vport); 65static void lpfc_disc_flush_list(struct lpfc_vport *vport);
65static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
67static int lpfc_fcf_inuse(struct lpfc_hba *);
66 68
67void 69void
68lpfc_terminate_rport_io(struct fc_rport *rport) 70lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
160 return; 162 return;
161} 163}
162 164
163/* 165/**
164 * This function is called from the worker thread when dev_loss_tmo 166 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
165 * expire. 167 * @ndlp: Pointer to remote node object.
166 */ 168 *
167static void 169 * This function is called from the worker thread when devloss timeout timer
170 * expires. For SLI4 host, this routine shall return 1 when at lease one
171 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
172 * routine shall return 0 when there is no remote node is still in use of FCF
173 * when devloss timeout happened to this @ndlp.
174 **/
175static int
168lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 176lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
169{ 177{
170 struct lpfc_rport_data *rdata; 178 struct lpfc_rport_data *rdata;
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
175 int put_node; 183 int put_node;
176 int put_rport; 184 int put_rport;
177 int warn_on = 0; 185 int warn_on = 0;
186 int fcf_inuse = 0;
178 187
179 rport = ndlp->rport; 188 rport = ndlp->rport;
180 189
181 if (!rport) 190 if (!rport)
182 return; 191 return fcf_inuse;
183 192
184 rdata = rport->dd_data; 193 rdata = rport->dd_data;
185 name = (uint8_t *) &ndlp->nlp_portname; 194 name = (uint8_t *) &ndlp->nlp_portname;
186 vport = ndlp->vport; 195 vport = ndlp->vport;
187 phba = vport->phba; 196 phba = vport->phba;
188 197
198 if (phba->sli_rev == LPFC_SLI_REV4)
199 fcf_inuse = lpfc_fcf_inuse(phba);
200
189 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 201 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
190 "rport devlosstmo:did:x%x type:x%x id:x%x", 202 "rport devlosstmo:did:x%x type:x%x id:x%x",
191 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 203 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
209 lpfc_nlp_put(ndlp); 221 lpfc_nlp_put(ndlp);
210 if (put_rport) 222 if (put_rport)
211 put_device(&rport->dev); 223 put_device(&rport->dev);
212 return; 224 return fcf_inuse;
213 } 225 }
214 226
215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 227 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
220 *name, *(name+1), *(name+2), *(name+3), 232 *name, *(name+1), *(name+2), *(name+3),
221 *(name+4), *(name+5), *(name+6), *(name+7), 233 *(name+4), *(name+5), *(name+6), *(name+7),
222 ndlp->nlp_DID); 234 ndlp->nlp_DID);
223 return; 235 return fcf_inuse;
224 } 236 }
225 237
226 if (ndlp->nlp_type & NLP_FABRIC) { 238 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
233 lpfc_nlp_put(ndlp); 245 lpfc_nlp_put(ndlp);
234 if (put_rport) 246 if (put_rport)
235 put_device(&rport->dev); 247 put_device(&rport->dev);
236 return; 248 return fcf_inuse;
237 } 249 }
238 250
239 if (ndlp->nlp_sid != NLP_NO_SID) { 251 if (ndlp->nlp_sid != NLP_NO_SID) {
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 292 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 293 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
282 294
295 return fcf_inuse;
296}
297
298/**
299 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
300 * @phba: Pointer to hba context object.
301 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
302 * @nlp_did: remote node identifer with devloss timeout.
303 *
304 * This function is called from the worker thread after invoking devloss
305 * timeout handler and releasing the reference count for the ndlp with
306 * which the devloss timeout was handled for SLI4 host. For the devloss
307 * timeout of the last remote node which had been in use of FCF, when this
308 * routine is invoked, it shall be guaranteed that none of the remote are
309 * in-use of FCF. When devloss timeout to the last remote using the FCF,
310 * if the FIP engine is neither in FCF table scan process nor roundrobin
311 * failover process, the in-use FCF shall be unregistered. If the FIP
312 * engine is in FCF discovery process, the devloss timeout state shall
313 * be set for either the FCF table scan process or roundrobin failover
314 * process to unregister the in-use FCF.
315 **/
316static void
317lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
318 uint32_t nlp_did)
319{
320 /* If devloss timeout happened to a remote node when FCF had no
321 * longer been in-use, do nothing.
322 */
323 if (!fcf_inuse)
324 return;
325
326 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
327 spin_lock_irq(&phba->hbalock);
328 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
329 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
330 spin_unlock_irq(&phba->hbalock);
331 return;
332 }
333 phba->hba_flag |= HBA_DEVLOSS_TMO;
334 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
335 "2847 Last remote node (x%x) using "
336 "FCF devloss tmo\n", nlp_did);
337 }
338 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
339 spin_unlock_irq(&phba->hbalock);
340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
341 "2868 Devloss tmo to FCF rediscovery "
342 "in progress\n");
343 return;
344 }
345 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
346 spin_unlock_irq(&phba->hbalock);
347 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
348 "2869 Devloss tmo to idle FIP engine, "
349 "unreg in-use FCF and rescan.\n");
350 /* Unregister in-use FCF and rescan */
351 lpfc_unregister_fcf_rescan(phba);
352 return;
353 }
354 spin_unlock_irq(&phba->hbalock);
355 if (phba->hba_flag & FCF_TS_INPROG)
356 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
357 "2870 FCF table scan in progress\n");
358 if (phba->hba_flag & FCF_RR_INPROG)
359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
360 "2871 FLOGI roundrobin FCF failover "
361 "in progress\n");
362 }
283 lpfc_unregister_unused_fcf(phba); 363 lpfc_unregister_unused_fcf(phba);
284} 364}
285 365
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba)
408 struct lpfc_work_evt *evtp = NULL; 488 struct lpfc_work_evt *evtp = NULL;
409 struct lpfc_nodelist *ndlp; 489 struct lpfc_nodelist *ndlp;
410 int free_evt; 490 int free_evt;
491 int fcf_inuse;
492 uint32_t nlp_did;
411 493
412 spin_lock_irq(&phba->hbalock); 494 spin_lock_irq(&phba->hbalock);
413 while (!list_empty(&phba->work_list)) { 495 while (!list_empty(&phba->work_list)) {
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
427 break; 509 break;
428 case LPFC_EVT_DEV_LOSS: 510 case LPFC_EVT_DEV_LOSS:
429 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 511 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
430 lpfc_dev_loss_tmo_handler(ndlp); 512 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
431 free_evt = 0; 513 free_evt = 0;
432 /* decrement the node reference count held for 514 /* decrement the node reference count held for
433 * this queued work 515 * this queued work
434 */ 516 */
517 nlp_did = ndlp->nlp_DID;
435 lpfc_nlp_put(ndlp); 518 lpfc_nlp_put(ndlp);
519 if (phba->sli_rev == LPFC_SLI_REV4)
520 lpfc_sli4_post_dev_loss_tmo_handler(phba,
521 fcf_inuse,
522 nlp_did);
436 break; 523 break;
437 case LPFC_EVT_ONLINE: 524 case LPFC_EVT_ONLINE:
438 if (phba->link_state < LPFC_LINK_DOWN) 525 if (phba->link_state < LPFC_LINK_DOWN)
@@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
707 : NLP_EVT_DEVICE_RECOVERY); 794 : NLP_EVT_DEVICE_RECOVERY);
708 } 795 }
709 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 796 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
797 if (phba->sli_rev == LPFC_SLI_REV4)
798 lpfc_sli4_unreg_all_rpis(vport);
710 lpfc_mbx_unreg_vpi(vport); 799 lpfc_mbx_unreg_vpi(vport);
711 spin_lock_irq(shost->host_lock); 800 spin_lock_irq(shost->host_lock);
712 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 801 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1021 "2017 REG_FCFI mbxStatus error x%x " 1110 "2017 REG_FCFI mbxStatus error x%x "
1022 "HBA state x%x\n", 1111 "HBA state x%x\n",
1023 mboxq->u.mb.mbxStatus, vport->port_state); 1112 mboxq->u.mb.mbxStatus, vport->port_state);
1024 mempool_free(mboxq, phba->mbox_mem_pool); 1113 goto fail_out;
1025 return;
1026 } 1114 }
1027 1115
1028 /* Start FCoE discovery by sending a FLOGI. */ 1116 /* Start FCoE discovery by sending a FLOGI. */
@@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1031 spin_lock_irq(&phba->hbalock); 1119 spin_lock_irq(&phba->hbalock);
1032 phba->fcf.fcf_flag |= FCF_REGISTERED; 1120 phba->fcf.fcf_flag |= FCF_REGISTERED;
1033 spin_unlock_irq(&phba->hbalock); 1121 spin_unlock_irq(&phba->hbalock);
1122
1034 /* If there is a pending FCoE event, restart FCF table scan. */ 1123 /* If there is a pending FCoE event, restart FCF table scan. */
1035 if (lpfc_check_pending_fcoe_event(phba, 1)) { 1124 if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1036 mempool_free(mboxq, phba->mbox_mem_pool); 1125 goto fail_out;
1037 return; 1126
1038 } 1127 /* Mark successful completion of FCF table scan */
1039 spin_lock_irq(&phba->hbalock); 1128 spin_lock_irq(&phba->hbalock);
1040 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1129 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1041 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1130 phba->hba_flag &= ~FCF_TS_INPROG;
1042 spin_unlock_irq(&phba->hbalock); 1131 if (vport->port_state != LPFC_FLOGI) {
1043 if (vport->port_state != LPFC_FLOGI) 1132 phba->hba_flag |= FCF_RR_INPROG;
1133 spin_unlock_irq(&phba->hbalock);
1044 lpfc_initial_flogi(vport); 1134 lpfc_initial_flogi(vport);
1135 goto out;
1136 }
1137 spin_unlock_irq(&phba->hbalock);
1138 goto out;
1045 1139
1140fail_out:
1141 spin_lock_irq(&phba->hbalock);
1142 phba->hba_flag &= ~FCF_RR_INPROG;
1143 spin_unlock_irq(&phba->hbalock);
1144out:
1046 mempool_free(mboxq, phba->mbox_mem_pool); 1145 mempool_free(mboxq, phba->mbox_mem_pool);
1047 return;
1048} 1146}
1049 1147
1050/** 1148/**
@@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1241 int rc; 1339 int rc;
1242 1340
1243 spin_lock_irq(&phba->hbalock); 1341 spin_lock_irq(&phba->hbalock);
1244
1245 /* If the FCF is not availabe do nothing. */ 1342 /* If the FCF is not availabe do nothing. */
1246 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1343 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1247 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1344 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1248 spin_unlock_irq(&phba->hbalock); 1345 spin_unlock_irq(&phba->hbalock);
1249 return; 1346 return;
1250 } 1347 }
@@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1252 /* The FCF is already registered, start discovery */ 1349 /* The FCF is already registered, start discovery */
1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1350 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1351 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1255 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1352 phba->hba_flag &= ~FCF_TS_INPROG;
1256 spin_unlock_irq(&phba->hbalock); 1353 if (phba->pport->port_state != LPFC_FLOGI) {
1257 if (phba->pport->port_state != LPFC_FLOGI) 1354 phba->hba_flag |= FCF_RR_INPROG;
1355 spin_unlock_irq(&phba->hbalock);
1258 lpfc_initial_flogi(phba->pport); 1356 lpfc_initial_flogi(phba->pport);
1357 return;
1358 }
1359 spin_unlock_irq(&phba->hbalock);
1259 return; 1360 return;
1260 } 1361 }
1261 spin_unlock_irq(&phba->hbalock); 1362 spin_unlock_irq(&phba->hbalock);
1262 1363
1263 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1364 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1264 GFP_KERNEL);
1265 if (!fcf_mbxq) { 1365 if (!fcf_mbxq) {
1266 spin_lock_irq(&phba->hbalock); 1366 spin_lock_irq(&phba->hbalock);
1267 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1367 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1268 spin_unlock_irq(&phba->hbalock); 1368 spin_unlock_irq(&phba->hbalock);
1269 return; 1369 return;
1270 } 1370 }
@@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1375 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1276 if (rc == MBX_NOT_FINISHED) { 1376 if (rc == MBX_NOT_FINISHED) {
1277 spin_lock_irq(&phba->hbalock); 1377 spin_lock_irq(&phba->hbalock);
1278 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1378 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1279 spin_unlock_irq(&phba->hbalock); 1379 spin_unlock_irq(&phba->hbalock);
1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1380 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1281 } 1381 }
@@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1493 * FCF discovery, no need to restart FCF discovery. 1593 * FCF discovery, no need to restart FCF discovery.
1494 */ 1594 */
1495 if ((phba->link_state >= LPFC_LINK_UP) && 1595 if ((phba->link_state >= LPFC_LINK_UP) &&
1496 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1596 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1497 return 0; 1597 return 0;
1498 1598
1499 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1599 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1517 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1617 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1518 } else { 1618 } else {
1519 /* 1619 /*
1520 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1620 * Do not continue FCF discovery and clear FCF_TS_INPROG
1521 * flag 1621 * flag
1522 */ 1622 */
1523 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1623 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1524 "2833 Stop FCF discovery process due to link " 1624 "2833 Stop FCF discovery process due to link "
1525 "state change (x%x)\n", phba->link_state); 1625 "state change (x%x)\n", phba->link_state);
1526 spin_lock_irq(&phba->hbalock); 1626 spin_lock_irq(&phba->hbalock);
1527 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1627 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1528 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1628 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1529 spin_unlock_irq(&phba->hbalock); 1629 spin_unlock_irq(&phba->hbalock);
1530 } 1630 }
@@ -1729,6 +1829,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1729} 1829}
1730 1830
1731/** 1831/**
1832 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1833 * @vport: Pointer to vport object.
1834 * @fcf_index: index to next fcf.
1835 *
1836 * This function processing the roundrobin fcf failover to next fcf index.
1837 * When this function is invoked, there will be a current fcf registered
1838 * for flogi.
1839 * Return: 0 for continue retrying flogi on currently registered fcf;
1840 * 1 for stop flogi on currently registered fcf;
1841 */
1842int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1843{
1844 struct lpfc_hba *phba = vport->phba;
1845 int rc;
1846
1847 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1848 spin_lock_irq(&phba->hbalock);
1849 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1850 spin_unlock_irq(&phba->hbalock);
1851 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1852 "2872 Devloss tmo with no eligible "
1853 "FCF, unregister in-use FCF (x%x) "
1854 "and rescan FCF table\n",
1855 phba->fcf.current_rec.fcf_indx);
1856 lpfc_unregister_fcf_rescan(phba);
1857 goto stop_flogi_current_fcf;
1858 }
1859 /* Mark the end to FLOGI roundrobin failover */
1860 phba->hba_flag &= ~FCF_RR_INPROG;
1861 /* Allow action to new fcf asynchronous event */
1862 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1863 spin_unlock_irq(&phba->hbalock);
1864 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1865 "2865 No FCF available, stop roundrobin FCF "
1866 "failover and change port state:x%x/x%x\n",
1867 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1868 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1869 goto stop_flogi_current_fcf;
1870 } else {
1871 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
1872 "2794 Try FLOGI roundrobin FCF failover to "
1873 "(x%x)\n", fcf_index);
1874 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
1875 if (rc)
1876 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1877 "2761 FLOGI roundrobin FCF failover "
1878 "failed (rc:x%x) to read FCF (x%x)\n",
1879 rc, phba->fcf.current_rec.fcf_indx);
1880 else
1881 goto stop_flogi_current_fcf;
1882 }
1883 return 0;
1884
1885stop_flogi_current_fcf:
1886 lpfc_can_disctmo(vport);
1887 return 1;
1888}
1889
1890/**
1732 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1891 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1733 * @phba: pointer to lpfc hba data structure. 1892 * @phba: pointer to lpfc hba data structure.
1734 * @mboxq: pointer to mailbox object. 1893 * @mboxq: pointer to mailbox object.
@@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1756 int rc; 1915 int rc;
1757 1916
1758 /* If there is pending FCoE event restart FCF table scan */ 1917 /* If there is pending FCoE event restart FCF table scan */
1759 if (lpfc_check_pending_fcoe_event(phba, 0)) { 1918 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
1760 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1919 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1761 return; 1920 return;
1762 } 1921 }
@@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1765 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1924 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1766 &next_fcf_index); 1925 &next_fcf_index);
1767 if (!new_fcf_record) { 1926 if (!new_fcf_record) {
1768 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1927 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1769 "2765 Mailbox command READ_FCF_RECORD " 1928 "2765 Mailbox command READ_FCF_RECORD "
1770 "failed to retrieve a FCF record.\n"); 1929 "failed to retrieve a FCF record.\n");
1771 /* Let next new FCF event trigger fast failover */ 1930 /* Let next new FCF event trigger fast failover */
1772 spin_lock_irq(&phba->hbalock); 1931 spin_lock_irq(&phba->hbalock);
1773 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1932 phba->hba_flag &= ~FCF_TS_INPROG;
1774 spin_unlock_irq(&phba->hbalock); 1933 spin_unlock_irq(&phba->hbalock);
1775 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1934 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1776 return; 1935 return;
@@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1787 /* 1946 /*
1788 * If the fcf record does not match with connect list entries 1947 * If the fcf record does not match with connect list entries
1789 * read the next entry; otherwise, this is an eligible FCF 1948 * read the next entry; otherwise, this is an eligible FCF
1790 * record for round robin FCF failover. 1949 * record for roundrobin FCF failover.
1791 */ 1950 */
1792 if (!rc) { 1951 if (!rc) {
1793 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1952 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1794 "2781 FCF record (x%x) failed FCF " 1953 "2781 FCF (x%x) failed connection "
1795 "connection list check, fcf_avail:x%x, " 1954 "list check: (x%x/x%x)\n",
1796 "fcf_valid:x%x\n",
1797 bf_get(lpfc_fcf_record_fcf_index, 1955 bf_get(lpfc_fcf_record_fcf_index,
1798 new_fcf_record), 1956 new_fcf_record),
1799 bf_get(lpfc_fcf_record_fcf_avail, 1957 bf_get(lpfc_fcf_record_fcf_avail,
@@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1803 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 1961 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
1804 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 1962 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
1805 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 1963 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
1964 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
1965 phba->fcf.current_rec.fcf_indx) {
1966 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1967 "2862 FCF (x%x) matches property "
1968 "of in-use FCF (x%x)\n",
1969 bf_get(lpfc_fcf_record_fcf_index,
1970 new_fcf_record),
1971 phba->fcf.current_rec.fcf_indx);
1972 goto read_next_fcf;
1973 }
1806 /* 1974 /*
1807 * In case the current in-use FCF record becomes 1975 * In case the current in-use FCF record becomes
1808 * invalid/unavailable during FCF discovery that 1976 * invalid/unavailable during FCF discovery that
@@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1813 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1981 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1814 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1982 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1815 "2835 Invalid in-use FCF " 1983 "2835 Invalid in-use FCF "
1816 "record (x%x) reported, " 1984 "(x%x), enter FCF failover "
1817 "entering fast FCF failover " 1985 "table scan.\n",
1818 "mode scanning.\n",
1819 phba->fcf.current_rec.fcf_indx); 1986 phba->fcf.current_rec.fcf_indx);
1820 spin_lock_irq(&phba->hbalock); 1987 spin_lock_irq(&phba->hbalock);
1821 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 1988 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
@@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1844 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2011 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1845 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2012 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
1846 new_fcf_record, vlan_id)) { 2013 new_fcf_record, vlan_id)) {
1847 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2014 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
1848 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 2015 phba->fcf.current_rec.fcf_indx) {
1849 /* Stop FCF redisc wait timer if pending */ 2016 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1850 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2017 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
1851 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2018 /* Stop FCF redisc wait timer */
1852 /* If in fast failover, mark it's completed */ 2019 __lpfc_sli4_stop_fcf_redisc_wait_timer(
1853 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2020 phba);
1854 spin_unlock_irq(&phba->hbalock); 2021 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1855 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2022 /* Fast failover, mark completed */
1856 "2836 The new FCF record (x%x) " 2023 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1857 "matches the in-use FCF record " 2024 spin_unlock_irq(&phba->hbalock);
1858 "(x%x)\n", 2025 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1859 phba->fcf.current_rec.fcf_indx, 2026 "2836 New FCF matches in-use "
2027 "FCF (x%x)\n",
2028 phba->fcf.current_rec.fcf_indx);
2029 goto out;
2030 } else
2031 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2032 "2863 New FCF (x%x) matches "
2033 "property of in-use FCF (x%x)\n",
1860 bf_get(lpfc_fcf_record_fcf_index, 2034 bf_get(lpfc_fcf_record_fcf_index,
1861 new_fcf_record)); 2035 new_fcf_record),
1862 goto out; 2036 phba->fcf.current_rec.fcf_indx);
1863 } 2037 }
1864 /* 2038 /*
1865 * Read next FCF record from HBA searching for the matching 2039 * Read next FCF record from HBA searching for the matching
@@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1953 */ 2127 */
1954 if (fcf_rec) { 2128 if (fcf_rec) {
1955 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2129 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1956 "2840 Update current FCF record " 2130 "2840 Update initial FCF candidate "
1957 "with initial FCF record (x%x)\n", 2131 "with FCF (x%x)\n",
1958 bf_get(lpfc_fcf_record_fcf_index, 2132 bf_get(lpfc_fcf_record_fcf_index,
1959 new_fcf_record)); 2133 new_fcf_record));
1960 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2134 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
@@ -1984,20 +2158,28 @@ read_next_fcf:
1984 */ 2158 */
1985 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2159 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1986 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2160 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1987 "2782 No suitable FCF record " 2161 "2782 No suitable FCF found: "
1988 "found during this round of " 2162 "(x%x/x%x)\n",
1989 "post FCF rediscovery scan: "
1990 "fcf_evt_tag:x%x, fcf_index: "
1991 "x%x\n",
1992 phba->fcoe_eventtag_at_fcf_scan, 2163 phba->fcoe_eventtag_at_fcf_scan,
1993 bf_get(lpfc_fcf_record_fcf_index, 2164 bf_get(lpfc_fcf_record_fcf_index,
1994 new_fcf_record)); 2165 new_fcf_record));
2166 spin_lock_irq(&phba->hbalock);
2167 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2168 phba->hba_flag &= ~FCF_TS_INPROG;
2169 spin_unlock_irq(&phba->hbalock);
2170 /* Unregister in-use FCF and rescan */
2171 lpfc_printf_log(phba, KERN_INFO,
2172 LOG_FIP,
2173 "2864 On devloss tmo "
2174 "unreg in-use FCF and "
2175 "rescan FCF table\n");
2176 lpfc_unregister_fcf_rescan(phba);
2177 return;
2178 }
1995 /* 2179 /*
1996 * Let next new FCF event trigger fast 2180 * Let next new FCF event trigger fast failover
1997 * failover
1998 */ 2181 */
1999 spin_lock_irq(&phba->hbalock); 2182 phba->hba_flag &= ~FCF_TS_INPROG;
2000 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
2001 spin_unlock_irq(&phba->hbalock); 2183 spin_unlock_irq(&phba->hbalock);
2002 return; 2184 return;
2003 } 2185 }
@@ -2015,9 +2197,8 @@ read_next_fcf:
2015 2197
2016 /* Replace in-use record with the new record */ 2198 /* Replace in-use record with the new record */
2017 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2199 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2018 "2842 Replace the current in-use " 2200 "2842 Replace in-use FCF (x%x) "
2019 "FCF record (x%x) with failover FCF " 2201 "with failover FCF (x%x)\n",
2020 "record (x%x)\n",
2021 phba->fcf.current_rec.fcf_indx, 2202 phba->fcf.current_rec.fcf_indx,
2022 phba->fcf.failover_rec.fcf_indx); 2203 phba->fcf.failover_rec.fcf_indx);
2023 memcpy(&phba->fcf.current_rec, 2204 memcpy(&phba->fcf.current_rec,
@@ -2029,15 +2210,8 @@ read_next_fcf:
2029 * FCF failover. 2210 * FCF failover.
2030 */ 2211 */
2031 spin_lock_irq(&phba->hbalock); 2212 spin_lock_irq(&phba->hbalock);
2032 phba->fcf.fcf_flag &= 2213 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2033 ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
2034 spin_unlock_irq(&phba->hbalock); 2214 spin_unlock_irq(&phba->hbalock);
2035 /*
2036 * Set up the initial registered FCF index for FLOGI
2037 * round robin FCF failover.
2038 */
2039 phba->fcf.fcf_rr_init_indx =
2040 phba->fcf.failover_rec.fcf_indx;
2041 /* Register to the new FCF record */ 2215 /* Register to the new FCF record */
2042 lpfc_register_fcf(phba); 2216 lpfc_register_fcf(phba);
2043 } else { 2217 } else {
@@ -2069,28 +2243,6 @@ read_next_fcf:
2069 LPFC_FCOE_FCF_GET_FIRST); 2243 LPFC_FCOE_FCF_GET_FIRST);
2070 return; 2244 return;
2071 } 2245 }
2072
2073 /*
2074 * Otherwise, initial scan or post linkdown rescan,
2075 * register with the best FCF record found so far
2076 * through the FCF scanning process.
2077 */
2078
2079 /*
2080 * Mark the initial FCF discovery completed and
2081 * the start of the first round of the roundrobin
2082 * FCF failover.
2083 */
2084 spin_lock_irq(&phba->hbalock);
2085 phba->fcf.fcf_flag &=
2086 ~(FCF_INIT_DISC | FCF_REDISC_RRU);
2087 spin_unlock_irq(&phba->hbalock);
2088 /*
2089 * Set up the initial registered FCF index for FLOGI
2090 * round robin FCF failover
2091 */
2092 phba->fcf.fcf_rr_init_indx =
2093 phba->fcf.current_rec.fcf_indx;
2094 /* Register to the new FCF record */ 2246 /* Register to the new FCF record */
2095 lpfc_register_fcf(phba); 2247 lpfc_register_fcf(phba);
2096 } 2248 }
@@ -2106,11 +2258,11 @@ out:
2106} 2258}
2107 2259
2108/** 2260/**
2109 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler 2261 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2110 * @phba: pointer to lpfc hba data structure. 2262 * @phba: pointer to lpfc hba data structure.
2111 * @mboxq: pointer to mailbox object. 2263 * @mboxq: pointer to mailbox object.
2112 * 2264 *
2113 * This is the callback function for FLOGI failure round robin FCF failover 2265 * This is the callback function for FLOGI failure roundrobin FCF failover
2114 * read FCF record mailbox command from the eligible FCF record bmask for 2266 * read FCF record mailbox command from the eligible FCF record bmask for
2115 * performing the failover. If the FCF read back is not valid/available, it 2267 * performing the failover. If the FCF read back is not valid/available, it
2116 * fails through to retrying FLOGI to the currently registered FCF again. 2268 * fails through to retrying FLOGI to the currently registered FCF again.
@@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2125{ 2277{
2126 struct fcf_record *new_fcf_record; 2278 struct fcf_record *new_fcf_record;
2127 uint32_t boot_flag, addr_mode; 2279 uint32_t boot_flag, addr_mode;
2128 uint16_t next_fcf_index; 2280 uint16_t next_fcf_index, fcf_index;
2129 uint16_t current_fcf_index; 2281 uint16_t current_fcf_index;
2130 uint16_t vlan_id; 2282 uint16_t vlan_id;
2283 int rc;
2131 2284
2132 /* If link state is not up, stop the round robin failover process */ 2285 /* If link state is not up, stop the roundrobin failover process */
2133 if (phba->link_state < LPFC_LINK_UP) { 2286 if (phba->link_state < LPFC_LINK_UP) {
2134 spin_lock_irq(&phba->hbalock); 2287 spin_lock_irq(&phba->hbalock);
2135 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2288 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2289 phba->hba_flag &= ~FCF_RR_INPROG;
2136 spin_unlock_irq(&phba->hbalock); 2290 spin_unlock_irq(&phba->hbalock);
2137 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2291 goto out;
2138 return;
2139 } 2292 }
2140 2293
2141 /* Parse the FCF record from the non-embedded mailbox command */ 2294 /* Parse the FCF record from the non-embedded mailbox command */
@@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2145 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2298 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2146 "2766 Mailbox command READ_FCF_RECORD " 2299 "2766 Mailbox command READ_FCF_RECORD "
2147 "failed to retrieve a FCF record.\n"); 2300 "failed to retrieve a FCF record.\n");
2148 goto out; 2301 goto error_out;
2149 } 2302 }
2150 2303
2151 /* Get the needed parameters from FCF record */ 2304 /* Get the needed parameters from FCF record */
2152 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2305 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2153 &addr_mode, &vlan_id); 2306 &addr_mode, &vlan_id);
2154 2307
2155 /* Log the FCF record information if turned on */ 2308 /* Log the FCF record information if turned on */
2156 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2309 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2157 next_fcf_index); 2310 next_fcf_index);
2158 2311
2312 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2313 if (!rc) {
2314 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2315 "2848 Remove ineligible FCF (x%x) from "
2316 "from roundrobin bmask\n", fcf_index);
2317 /* Clear roundrobin bmask bit for ineligible FCF */
2318 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2319 /* Perform next round of roundrobin FCF failover */
2320 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2321 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2322 if (rc)
2323 goto out;
2324 goto error_out;
2325 }
2326
2327 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2328 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2329 "2760 Perform FLOGI roundrobin FCF failover: "
2330 "FCF (x%x) back to FCF (x%x)\n",
2331 phba->fcf.current_rec.fcf_indx, fcf_index);
2332 /* Wait 500 ms before retrying FLOGI to current FCF */
2333 msleep(500);
2334 lpfc_initial_flogi(phba->pport);
2335 goto out;
2336 }
2337
2159 /* Upload new FCF record to the failover FCF record */ 2338 /* Upload new FCF record to the failover FCF record */
2160 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2339 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2161 "2834 Update the current FCF record (x%x) " 2340 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2162 "with the next FCF record (x%x)\n", 2341 phba->fcf.failover_rec.fcf_indx, fcf_index);
2163 phba->fcf.failover_rec.fcf_indx,
2164 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2165 spin_lock_irq(&phba->hbalock); 2342 spin_lock_irq(&phba->hbalock);
2166 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2343 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2167 new_fcf_record, addr_mode, vlan_id, 2344 new_fcf_record, addr_mode, vlan_id,
@@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2178 sizeof(struct lpfc_fcf_rec)); 2355 sizeof(struct lpfc_fcf_rec));
2179 2356
2180 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2357 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2181 "2783 FLOGI round robin FCF failover from FCF " 2358 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2182 "(x%x) to FCF (x%x).\n", 2359 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2183 current_fcf_index,
2184 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2185 2360
2361error_out:
2362 lpfc_register_fcf(phba);
2186out: 2363out:
2187 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2364 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2188 lpfc_register_fcf(phba);
2189} 2365}
2190 2366
2191/** 2367/**
@@ -2194,10 +2370,10 @@ out:
2194 * @mboxq: pointer to mailbox object. 2370 * @mboxq: pointer to mailbox object.
2195 * 2371 *
2196 * This is the callback function of read FCF record mailbox command for 2372 * This is the callback function of read FCF record mailbox command for
2197 * updating the eligible FCF bmask for FLOGI failure round robin FCF 2373 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2198 * failover when a new FCF event happened. If the FCF read back is 2374 * failover when a new FCF event happened. If the FCF read back is
2199 * valid/available and it passes the connection list check, it updates 2375 * valid/available and it passes the connection list check, it updates
2200 * the bmask for the eligible FCF record for round robin failover. 2376 * the bmask for the eligible FCF record for roundrobin failover.
2201 */ 2377 */
2202void 2378void
2203lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2379lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2639 * and get the FCF Table. 2815 * and get the FCF Table.
2640 */ 2816 */
2641 spin_lock_irq(&phba->hbalock); 2817 spin_lock_irq(&phba->hbalock);
2642 if (phba->hba_flag & FCF_DISC_INPROGRESS) { 2818 if (phba->hba_flag & FCF_TS_INPROG) {
2643 spin_unlock_irq(&phba->hbalock); 2819 spin_unlock_irq(&phba->hbalock);
2644 return; 2820 return;
2645 } 2821 }
@@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
3906 LPFC_MBOXQ_t *mbox; 4082 LPFC_MBOXQ_t *mbox;
3907 int rc; 4083 int rc;
3908 4084
4085 if (phba->sli_rev == LPFC_SLI_REV4) {
4086 lpfc_sli4_unreg_all_rpis(vport);
4087 return;
4088 }
4089
3909 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4090 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3910 if (mbox) { 4091 if (mbox) {
3911 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 4092 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
@@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3992 } 4173 }
3993 4174
3994 spin_lock_irq(&phba->hbalock); 4175 spin_lock_irq(&phba->hbalock);
4176 /* Cleanup REG_LOGIN completions which are not yet processed */
4177 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4178 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4179 (ndlp != (struct lpfc_nodelist *) mb->context2))
4180 continue;
4181
4182 mb->context2 = NULL;
4183 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4184 }
4185
3995 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 4186 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
3996 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4187 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
3997 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4188 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5170 if (ndlp) 5361 if (ndlp)
5171 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5362 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
5172 lpfc_cleanup_pending_mbox(vports[i]); 5363 lpfc_cleanup_pending_mbox(vports[i]);
5364 if (phba->sli_rev == LPFC_SLI_REV4)
5365 lpfc_sli4_unreg_all_rpis(vports[i]);
5173 lpfc_mbx_unreg_vpi(vports[i]); 5366 lpfc_mbx_unreg_vpi(vports[i]);
5174 shost = lpfc_shost_from_vport(vports[i]); 5367 shost = lpfc_shost_from_vport(vports[i]);
5175 spin_lock_irq(shost->host_lock); 5368 spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index a631647051d..9b833345646 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -861,6 +861,47 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */
861 uint32_t crcCnt; 861 uint32_t crcCnt;
862} RPS_RSP; 862} RPS_RSP;
863 863
864struct RLS { /* Structure is in Big Endian format */
865 uint32_t rls;
866#define rls_rsvd_SHIFT 24
867#define rls_rsvd_MASK 0x000000ff
868#define rls_rsvd_WORD rls
869#define rls_did_SHIFT 0
870#define rls_did_MASK 0x00ffffff
871#define rls_did_WORD rls
872};
873
874struct RLS_RSP { /* Structure is in Big Endian format */
875 uint32_t linkFailureCnt;
876 uint32_t lossSyncCnt;
877 uint32_t lossSignalCnt;
878 uint32_t primSeqErrCnt;
879 uint32_t invalidXmitWord;
880 uint32_t crcCnt;
881};
882
883struct RTV_RSP { /* Structure is in Big Endian format */
884 uint32_t ratov;
885 uint32_t edtov;
886 uint32_t qtov;
887#define qtov_rsvd0_SHIFT 28
888#define qtov_rsvd0_MASK 0x0000000f
889#define qtov_rsvd0_WORD qtov /* reserved */
890#define qtov_edtovres_SHIFT 27
891#define qtov_edtovres_MASK 0x00000001
892#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */
893#define qtov__rsvd1_SHIFT 19
894#define qtov_rsvd1_MASK 0x0000003f
895#define qtov_rsvd1_WORD qtov /* reserved */
896#define qtov_rttov_SHIFT 18
897#define qtov_rttov_MASK 0x00000001
898#define qtov_rttov_WORD qtov /* R_T_TOV value */
899#define qtov_rsvd2_SHIFT 0
900#define qtov_rsvd2_MASK 0x0003ffff
901#define qtov_rsvd2_WORD qtov /* reserved */
902};
903
904
864typedef struct _RPL { /* Structure is in Big Endian format */ 905typedef struct _RPL { /* Structure is in Big Endian format */
865 uint32_t maxsize; 906 uint32_t maxsize;
866 uint32_t index; 907 uint32_t index;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bbdcf96800f..6e4bc34e1d0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -424,79 +424,6 @@ struct lpfc_rcqe {
424#define FCOE_SOFn3 0x36 424#define FCOE_SOFn3 0x36
425}; 425};
426 426
427struct lpfc_wqe_generic{
428 struct ulp_bde64 bde;
429 uint32_t word3;
430 uint32_t word4;
431 uint32_t word5;
432 uint32_t word6;
433#define lpfc_wqe_gen_context_SHIFT 16
434#define lpfc_wqe_gen_context_MASK 0x0000FFFF
435#define lpfc_wqe_gen_context_WORD word6
436#define lpfc_wqe_gen_xri_SHIFT 0
437#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
438#define lpfc_wqe_gen_xri_WORD word6
439 uint32_t word7;
440#define lpfc_wqe_gen_lnk_SHIFT 23
441#define lpfc_wqe_gen_lnk_MASK 0x00000001
442#define lpfc_wqe_gen_lnk_WORD word7
443#define lpfc_wqe_gen_erp_SHIFT 22
444#define lpfc_wqe_gen_erp_MASK 0x00000001
445#define lpfc_wqe_gen_erp_WORD word7
446#define lpfc_wqe_gen_pu_SHIFT 20
447#define lpfc_wqe_gen_pu_MASK 0x00000003
448#define lpfc_wqe_gen_pu_WORD word7
449#define lpfc_wqe_gen_class_SHIFT 16
450#define lpfc_wqe_gen_class_MASK 0x00000007
451#define lpfc_wqe_gen_class_WORD word7
452#define lpfc_wqe_gen_command_SHIFT 8
453#define lpfc_wqe_gen_command_MASK 0x000000FF
454#define lpfc_wqe_gen_command_WORD word7
455#define lpfc_wqe_gen_status_SHIFT 4
456#define lpfc_wqe_gen_status_MASK 0x0000000F
457#define lpfc_wqe_gen_status_WORD word7
458#define lpfc_wqe_gen_ct_SHIFT 2
459#define lpfc_wqe_gen_ct_MASK 0x00000003
460#define lpfc_wqe_gen_ct_WORD word7
461 uint32_t abort_tag;
462 uint32_t word9;
463#define lpfc_wqe_gen_request_tag_SHIFT 0
464#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
465#define lpfc_wqe_gen_request_tag_WORD word9
466 uint32_t word10;
467#define lpfc_wqe_gen_ccp_SHIFT 24
468#define lpfc_wqe_gen_ccp_MASK 0x000000FF
469#define lpfc_wqe_gen_ccp_WORD word10
470#define lpfc_wqe_gen_ccpe_SHIFT 23
471#define lpfc_wqe_gen_ccpe_MASK 0x00000001
472#define lpfc_wqe_gen_ccpe_WORD word10
473#define lpfc_wqe_gen_pv_SHIFT 19
474#define lpfc_wqe_gen_pv_MASK 0x00000001
475#define lpfc_wqe_gen_pv_WORD word10
476#define lpfc_wqe_gen_pri_SHIFT 16
477#define lpfc_wqe_gen_pri_MASK 0x00000007
478#define lpfc_wqe_gen_pri_WORD word10
479 uint32_t word11;
480#define lpfc_wqe_gen_cq_id_SHIFT 16
481#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
482#define lpfc_wqe_gen_cq_id_WORD word11
483#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
484#define lpfc_wqe_gen_wqec_SHIFT 7
485#define lpfc_wqe_gen_wqec_MASK 0x00000001
486#define lpfc_wqe_gen_wqec_WORD word11
487#define ELS_ID_FLOGI 3
488#define ELS_ID_FDISC 2
489#define ELS_ID_LOGO 1
490#define ELS_ID_DEFAULT 0
491#define lpfc_wqe_gen_els_id_SHIFT 4
492#define lpfc_wqe_gen_els_id_MASK 0x00000003
493#define lpfc_wqe_gen_els_id_WORD word11
494#define lpfc_wqe_gen_cmd_type_SHIFT 0
495#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
496#define lpfc_wqe_gen_cmd_type_WORD word11
497 uint32_t payload[4];
498};
499
500struct lpfc_rqe { 427struct lpfc_rqe {
501 uint32_t address_hi; 428 uint32_t address_hi;
502 uint32_t address_lo; 429 uint32_t address_lo;
@@ -2279,9 +2206,36 @@ struct wqe_common {
2279#define wqe_reqtag_MASK 0x0000FFFF 2206#define wqe_reqtag_MASK 0x0000FFFF
2280#define wqe_reqtag_WORD word9 2207#define wqe_reqtag_WORD word9
2281#define wqe_rcvoxid_SHIFT 16 2208#define wqe_rcvoxid_SHIFT 16
2282#define wqe_rcvoxid_MASK 0x0000FFFF 2209#define wqe_rcvoxid_MASK 0x0000FFFF
2283#define wqe_rcvoxid_WORD word9 2210#define wqe_rcvoxid_WORD word9
2284 uint32_t word10; 2211 uint32_t word10;
2212#define wqe_ebde_cnt_SHIFT 0
2213#define wqe_ebde_cnt_MASK 0x00000007
2214#define wqe_ebde_cnt_WORD word10
2215#define wqe_lenloc_SHIFT 7
2216#define wqe_lenloc_MASK 0x00000003
2217#define wqe_lenloc_WORD word10
2218#define LPFC_WQE_LENLOC_NONE 0
2219#define LPFC_WQE_LENLOC_WORD3 1
2220#define LPFC_WQE_LENLOC_WORD12 2
2221#define LPFC_WQE_LENLOC_WORD4 3
2222#define wqe_qosd_SHIFT 9
2223#define wqe_qosd_MASK 0x00000001
2224#define wqe_qosd_WORD word10
2225#define wqe_xbl_SHIFT 11
2226#define wqe_xbl_MASK 0x00000001
2227#define wqe_xbl_WORD word10
2228#define wqe_iod_SHIFT 13
2229#define wqe_iod_MASK 0x00000001
2230#define wqe_iod_WORD word10
2231#define LPFC_WQE_IOD_WRITE 0
2232#define LPFC_WQE_IOD_READ 1
2233#define wqe_dbde_SHIFT 14
2234#define wqe_dbde_MASK 0x00000001
2235#define wqe_dbde_WORD word10
2236#define wqe_wqes_SHIFT 15
2237#define wqe_wqes_MASK 0x00000001
2238#define wqe_wqes_WORD word10
2285#define wqe_pri_SHIFT 16 2239#define wqe_pri_SHIFT 16
2286#define wqe_pri_MASK 0x00000007 2240#define wqe_pri_MASK 0x00000007
2287#define wqe_pri_WORD word10 2241#define wqe_pri_WORD word10
@@ -2295,18 +2249,26 @@ struct wqe_common {
2295#define wqe_ccpe_MASK 0x00000001 2249#define wqe_ccpe_MASK 0x00000001
2296#define wqe_ccpe_WORD word10 2250#define wqe_ccpe_WORD word10
2297#define wqe_ccp_SHIFT 24 2251#define wqe_ccp_SHIFT 24
2298#define wqe_ccp_MASK 0x000000ff 2252#define wqe_ccp_MASK 0x000000ff
2299#define wqe_ccp_WORD word10 2253#define wqe_ccp_WORD word10
2300 uint32_t word11; 2254 uint32_t word11;
2301#define wqe_cmd_type_SHIFT 0 2255#define wqe_cmd_type_SHIFT 0
2302#define wqe_cmd_type_MASK 0x0000000f 2256#define wqe_cmd_type_MASK 0x0000000f
2303#define wqe_cmd_type_WORD word11 2257#define wqe_cmd_type_WORD word11
2304#define wqe_wqec_SHIFT 7 2258#define wqe_els_id_SHIFT 4
2305#define wqe_wqec_MASK 0x00000001 2259#define wqe_els_id_MASK 0x00000003
2306#define wqe_wqec_WORD word11 2260#define wqe_els_id_WORD word11
2307#define wqe_cqid_SHIFT 16 2261#define LPFC_ELS_ID_FLOGI 3
2308#define wqe_cqid_MASK 0x0000ffff 2262#define LPFC_ELS_ID_FDISC 2
2309#define wqe_cqid_WORD word11 2263#define LPFC_ELS_ID_LOGO 1
2264#define LPFC_ELS_ID_DEFAULT 0
2265#define wqe_wqec_SHIFT 7
2266#define wqe_wqec_MASK 0x00000001
2267#define wqe_wqec_WORD word11
2268#define wqe_cqid_SHIFT 16
2269#define wqe_cqid_MASK 0x0000ffff
2270#define wqe_cqid_WORD word11
2271#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
2310}; 2272};
2311 2273
2312struct wqe_did { 2274struct wqe_did {
@@ -2325,6 +2287,15 @@ struct wqe_did {
2325#define wqe_xmit_bls_xo_WORD word5 2287#define wqe_xmit_bls_xo_WORD word5
2326}; 2288};
2327 2289
2290struct lpfc_wqe_generic{
2291 struct ulp_bde64 bde;
2292 uint32_t word3;
2293 uint32_t word4;
2294 uint32_t word5;
2295 struct wqe_common wqe_com;
2296 uint32_t payload[4];
2297};
2298
2328struct els_request64_wqe { 2299struct els_request64_wqe {
2329 struct ulp_bde64 bde; 2300 struct ulp_bde64 bde;
2330 uint32_t payload_len; 2301 uint32_t payload_len;
@@ -2356,9 +2327,9 @@ struct els_request64_wqe {
2356 2327
2357struct xmit_els_rsp64_wqe { 2328struct xmit_els_rsp64_wqe {
2358 struct ulp_bde64 bde; 2329 struct ulp_bde64 bde;
2359 uint32_t rsvd3; 2330 uint32_t response_payload_len;
2360 uint32_t rsvd4; 2331 uint32_t rsvd4;
2361 struct wqe_did wqe_dest; 2332 struct wqe_did wqe_dest;
2362 struct wqe_common wqe_com; /* words 6-11 */ 2333 struct wqe_common wqe_com; /* words 6-11 */
2363 uint32_t rsvd_12_15[4]; 2334 uint32_t rsvd_12_15[4];
2364}; 2335};
@@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl {
2427 2398
2428struct xmit_seq64_wqe { 2399struct xmit_seq64_wqe {
2429 struct ulp_bde64 bde; 2400 struct ulp_bde64 bde;
2430 uint32_t paylaod_offset; 2401 uint32_t rsvd3;
2431 uint32_t relative_offset; 2402 uint32_t relative_offset;
2432 struct wqe_rctl_dfctl wge_ctl; 2403 struct wqe_rctl_dfctl wge_ctl;
2433 struct wqe_common wqe_com; /* words 6-11 */ 2404 struct wqe_common wqe_com; /* words 6-11 */
@@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe {
2437}; 2408};
2438struct xmit_bcast64_wqe { 2409struct xmit_bcast64_wqe {
2439 struct ulp_bde64 bde; 2410 struct ulp_bde64 bde;
2440 uint32_t paylaod_len; 2411 uint32_t seq_payload_len;
2441 uint32_t rsvd4; 2412 uint32_t rsvd4;
2442 struct wqe_rctl_dfctl wge_ctl; /* word 5 */ 2413 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2443 struct wqe_common wqe_com; /* words 6-11 */ 2414 struct wqe_common wqe_com; /* words 6-11 */
@@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe {
2446 2417
2447struct gen_req64_wqe { 2418struct gen_req64_wqe {
2448 struct ulp_bde64 bde; 2419 struct ulp_bde64 bde;
2449 uint32_t command_len; 2420 uint32_t request_payload_len;
2450 uint32_t payload_len; 2421 uint32_t relative_offset;
2451 struct wqe_rctl_dfctl wge_ctl; /* word 5 */ 2422 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2452 struct wqe_common wqe_com; /* words 6-11 */ 2423 struct wqe_common wqe_com; /* words 6-11 */
2453 uint32_t rsvd_12_15[4]; 2424 uint32_t rsvd_12_15[4];
@@ -2480,7 +2451,7 @@ struct abort_cmd_wqe {
2480 2451
2481struct fcp_iwrite64_wqe { 2452struct fcp_iwrite64_wqe {
2482 struct ulp_bde64 bde; 2453 struct ulp_bde64 bde;
2483 uint32_t payload_len; 2454 uint32_t payload_offset_len;
2484 uint32_t total_xfer_len; 2455 uint32_t total_xfer_len;
2485 uint32_t initial_xfer_len; 2456 uint32_t initial_xfer_len;
2486 struct wqe_common wqe_com; /* words 6-11 */ 2457 struct wqe_common wqe_com; /* words 6-11 */
@@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe {
2489 2460
2490struct fcp_iread64_wqe { 2461struct fcp_iread64_wqe {
2491 struct ulp_bde64 bde; 2462 struct ulp_bde64 bde;
2492 uint32_t payload_len; /* word 3 */ 2463 uint32_t payload_offset_len; /* word 3 */
2493 uint32_t total_xfer_len; /* word 4 */ 2464 uint32_t total_xfer_len; /* word 4 */
2494 uint32_t rsrvd5; /* word 5 */ 2465 uint32_t rsrvd5; /* word 5 */
2495 struct wqe_common wqe_com; /* words 6-11 */ 2466 struct wqe_common wqe_com; /* words 6-11 */
@@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe {
2497}; 2468};
2498 2469
2499struct fcp_icmnd64_wqe { 2470struct fcp_icmnd64_wqe {
2500 struct ulp_bde64 bde; /* words 0-2 */ 2471 struct ulp_bde64 bde; /* words 0-2 */
2501 uint32_t rsrvd[3]; /* words 3-5 */ 2472 uint32_t rsrvd3; /* word 3 */
2473 uint32_t rsrvd4; /* word 4 */
2474 uint32_t rsrvd5; /* word 5 */
2502 struct wqe_common wqe_com; /* words 6-11 */ 2475 struct wqe_common wqe_com; /* words 6-11 */
2503 uint32_t rsvd_12_15[4]; /* word 12-15 */ 2476 uint32_t rsvd_12_15[4]; /* word 12-15 */
2504}; 2477};
2505 2478
2506 2479
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 295c7ddb36c..b3065791f30 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
813 813
814 return 0; 814 return 0;
815} 815}
816
816/** 817/**
817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
818 * @phba: pointer to lpfc HBA data structure. 819 * @phba: pointer to lpfc HBA data structure.
@@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2234void 2235void
2235__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2236__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2236{ 2237{
2237 /* Clear pending FCF rediscovery wait and failover in progress flags */ 2238 /* Clear pending FCF rediscovery wait flag */
2238 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2239 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2239 FCF_DEAD_DISC | 2240
2240 FCF_ACVL_DISC);
2241 /* Now, try to stop the timer */ 2241 /* Now, try to stop the timer */
2242 del_timer(&phba->fcf.redisc_wait); 2242 del_timer(&phba->fcf.redisc_wait);
2243} 2243}
@@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2261 return; 2261 return;
2262 } 2262 }
2263 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2263 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2264 /* Clear failover in progress flags */
2265 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2264 spin_unlock_irq(&phba->hbalock); 2266 spin_unlock_irq(&phba->hbalock);
2265} 2267}
2266 2268
@@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2935 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2937 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2936 spin_unlock_irq(&phba->hbalock); 2938 spin_unlock_irq(&phba->hbalock);
2937 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2939 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2938 "2776 FCF rediscover wait timer expired, post " 2940 "2776 FCF rediscover quiescent timer expired\n");
2939 "a worker thread event for FCF table scan\n");
2940 /* wake up worker thread */ 2941 /* wake up worker thread */
2941 lpfc_worker_wake_up(phba); 2942 lpfc_worker_wake_up(phba);
2942} 2943}
@@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3311 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3312 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3313 LOG_DISCOVERY, 3314 LOG_DISCOVERY,
3314 "2546 New FCF found event: " 3315 "2546 New FCF event, evt_tag:x%x, "
3315 "evt_tag:x%x, fcf_index:x%x\n", 3316 "index:x%x\n",
3316 acqe_fcoe->event_tag, 3317 acqe_fcoe->event_tag,
3317 acqe_fcoe->index); 3318 acqe_fcoe->index);
3318 else 3319 else
3319 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3320 LOG_DISCOVERY, 3321 LOG_DISCOVERY,
3321 "2788 FCF parameter modified event: " 3322 "2788 FCF param modified event, "
3322 "evt_tag:x%x, fcf_index:x%x\n", 3323 "evt_tag:x%x, index:x%x\n",
3323 acqe_fcoe->event_tag, 3324 acqe_fcoe->event_tag,
3324 acqe_fcoe->index); 3325 acqe_fcoe->index);
3325 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3326 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3326 /* 3327 /*
3327 * During period of FCF discovery, read the FCF 3328 * During period of FCF discovery, read the FCF
3328 * table record indexed by the event to update 3329 * table record indexed by the event to update
3329 * FCF round robin failover eligible FCF bmask. 3330 * FCF roundrobin failover eligible FCF bmask.
3330 */ 3331 */
3331 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3332 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3332 LOG_DISCOVERY, 3333 LOG_DISCOVERY,
3333 "2779 Read new FCF record with " 3334 "2779 Read FCF (x%x) for updating "
3334 "fcf_index:x%x for updating FCF " 3335 "roundrobin FCF failover bmask\n",
3335 "round robin failover bmask\n",
3336 acqe_fcoe->index); 3336 acqe_fcoe->index);
3337 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3337 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3338 } 3338 }
3339 3339
3340 /* If the FCF discovery is in progress, do nothing. */ 3340 /* If the FCF discovery is in progress, do nothing. */
3341 spin_lock_irq(&phba->hbalock); 3341 spin_lock_irq(&phba->hbalock);
3342 if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3342 if (phba->hba_flag & FCF_TS_INPROG) {
3343 spin_unlock_irq(&phba->hbalock); 3343 spin_unlock_irq(&phba->hbalock);
3344 break; 3344 break;
3345 } 3345 }
@@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3358 3358
3359 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3359 /* Otherwise, scan the entire FCF table and re-discover SAN */
3360 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3360 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3361 "2770 Start FCF table scan due to new FCF " 3361 "2770 Start FCF table scan per async FCF "
3362 "event: evt_tag:x%x, fcf_index:x%x\n", 3362 "event, evt_tag:x%x, index:x%x\n",
3363 acqe_fcoe->event_tag, acqe_fcoe->index); 3363 acqe_fcoe->event_tag, acqe_fcoe->index);
3364 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3364 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3365 LPFC_FCOE_FCF_GET_FIRST); 3365 LPFC_FCOE_FCF_GET_FIRST);
3366 if (rc) 3366 if (rc)
3367 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3367 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3368 "2547 Issue FCF scan read FCF mailbox " 3368 "2547 Issue FCF scan read FCF mailbox "
3369 "command failed 0x%x\n", rc); 3369 "command failed (x%x)\n", rc);
3370 break; 3370 break;
3371 3371
3372 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3372 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3378 3378
3379 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3379 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3381 "2549 FCF disconnected from network index 0x%x" 3381 "2549 FCF (x%x) disconnected from network, "
3382 " tag 0x%x\n", acqe_fcoe->index, 3382 "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3383 acqe_fcoe->event_tag);
3384 /* 3383 /*
3385 * If we are in the middle of FCF failover process, clear 3384 * If we are in the middle of FCF failover process, clear
3386 * the corresponding FCF bit in the roundrobin bitmap. 3385 * the corresponding FCF bit in the roundrobin bitmap.
@@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3494 spin_unlock_irq(&phba->hbalock); 3493 spin_unlock_irq(&phba->hbalock);
3495 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3494 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3496 LOG_DISCOVERY, 3495 LOG_DISCOVERY,
3497 "2773 Start FCF fast failover due " 3496 "2773 Start FCF failover per CVL, "
3498 "to CVL event: evt_tag:x%x\n", 3497 "evt_tag:x%x\n", acqe_fcoe->event_tag);
3499 acqe_fcoe->event_tag);
3500 rc = lpfc_sli4_redisc_fcf_table(phba); 3498 rc = lpfc_sli4_redisc_fcf_table(phba);
3501 if (rc) { 3499 if (rc) {
3502 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3500 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3646 3644
3647 /* Scan FCF table from the first entry to re-discover SAN */ 3645 /* Scan FCF table from the first entry to re-discover SAN */
3648 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3646 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3649 "2777 Start FCF table scan after FCF " 3647 "2777 Start post-quiescent FCF table scan\n");
3650 "rediscovery quiescent period over\n");
3651 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3648 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3652 if (rc) 3649 if (rc)
3653 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3650 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4165 goto out_free_active_sgl; 4162 goto out_free_active_sgl;
4166 } 4163 }
4167 4164
4168 /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4165 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4169 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4166 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4170 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4167 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4171 GFP_KERNEL); 4168 GFP_KERNEL);
@@ -7271,6 +7268,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7271} 7268}
7272 7269
7273/** 7270/**
7271 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
7272 * @phba: Pointer to HBA context object.
7273 *
7274 * This function is called in the SLI4 code path to wait for completion
7275 * of device's XRIs exchange busy. It will check the XRI exchange busy
7276 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
7277 * that, it will check the XRI exchange busy on outstanding FCP and ELS
7278 * I/Os every 30 seconds, log error message, and wait forever. Only when
7279 * all XRI exchange busy complete, the driver unload shall proceed with
7280 * invoking the function reset ioctl mailbox command to the CNA and the
7281 * the rest of the driver unload resource release.
7282 **/
7283static void
7284lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
7285{
7286 int wait_time = 0;
7287 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7288 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7289
7290 while (!fcp_xri_cmpl || !els_xri_cmpl) {
7291 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
7292 if (!fcp_xri_cmpl)
7293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7294 "2877 FCP XRI exchange busy "
7295 "wait time: %d seconds.\n",
7296 wait_time/1000);
7297 if (!els_xri_cmpl)
7298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7299 "2878 ELS XRI exchange busy "
7300 "wait time: %d seconds.\n",
7301 wait_time/1000);
7302 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
7303 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
7304 } else {
7305 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
7306 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
7307 }
7308 fcp_xri_cmpl =
7309 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7310 els_xri_cmpl =
7311 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7312 }
7313}
7314
7315/**
7274 * lpfc_sli4_hba_unset - Unset the fcoe hba 7316 * lpfc_sli4_hba_unset - Unset the fcoe hba
7275 * @phba: Pointer to HBA context object. 7317 * @phba: Pointer to HBA context object.
7276 * 7318 *
@@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7315 spin_unlock_irq(&phba->hbalock); 7357 spin_unlock_irq(&phba->hbalock);
7316 } 7358 }
7317 7359
7360 /* Abort all iocbs associated with the hba */
7361 lpfc_sli_hba_iocb_abort(phba);
7362
7363 /* Wait for completion of device XRI exchange busy */
7364 lpfc_sli4_xri_exchange_busy_wait(phba);
7365
7318 /* Disable PCI subsystem interrupt */ 7366 /* Disable PCI subsystem interrupt */
7319 lpfc_sli4_disable_intr(phba); 7367 lpfc_sli4_disable_intr(phba);
7320 7368
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 0dfa310cd60..62d0957e1d4 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -797,6 +797,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
797} 797}
798 798
799/** 799/**
800 * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
801 * @vport: pointer to a vport object.
802 *
803 * This routine sends mailbox command to unregister all active RPIs for
804 * a vport.
805 **/
806void
807lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
808{
809 struct lpfc_hba *phba = vport->phba;
810 LPFC_MBOXQ_t *mbox;
811 int rc;
812
813 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
814 if (mbox) {
815 lpfc_unreg_login(phba, vport->vpi,
816 vport->vpi + phba->vpi_base, mbox);
817 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
818 mbox->vport = vport;
819 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
820 mbox->context1 = NULL;
821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
822 if (rc == MBX_NOT_FINISHED)
823 mempool_free(mbox, phba->mbox_mem_pool);
824 }
825}
826
827/**
800 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier 828 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
801 * @phba: pointer to lpfc hba data structure. 829 * @phba: pointer to lpfc hba data structure.
802 * @vpi: virtual N_Port identifier. 830 * @vpi: virtual N_Port identifier.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3a658953486..f64b65a770b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
169 spin_lock_irqsave(shost->host_lock, flags); 169 spin_lock_irqsave(shost->host_lock, flags);
170 if (!vport->stat_data_enabled || 170 if (!vport->stat_data_enabled ||
171 vport->stat_data_blocked || 171 vport->stat_data_blocked ||
172 !pnode ||
172 !pnode->lat_data || 173 !pnode->lat_data ||
173 (phba->bucket_type == LPFC_NO_BUCKET)) { 174 (phba->bucket_type == LPFC_NO_BUCKET)) {
174 spin_unlock_irqrestore(shost->host_lock, flags); 175 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
2040 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 2041 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
2041 unsigned long flags; 2042 unsigned long flags;
2042 2043
2044 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2045 return;
2046
2043 /* If there is queuefull or busy condition send a scsi event */ 2047 /* If there is queuefull or busy condition send a scsi event */
2044 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 2048 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
2045 (cmnd->result == SAM_STAT_BUSY)) { 2049 (cmnd->result == SAM_STAT_BUSY)) {
@@ -3226,10 +3230,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3226 struct lpfc_scsi_buf *lpfc_cmd; 3230 struct lpfc_scsi_buf *lpfc_cmd;
3227 struct lpfc_iocbq *iocbq; 3231 struct lpfc_iocbq *iocbq;
3228 struct lpfc_iocbq *iocbqrsp; 3232 struct lpfc_iocbq *iocbqrsp;
3233 struct lpfc_nodelist *pnode = rdata->pnode;
3229 int ret; 3234 int ret;
3230 int status; 3235 int status;
3231 3236
3232 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 3237 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3233 return FAILED; 3238 return FAILED;
3234 3239
3235 lpfc_cmd = lpfc_get_scsi_buf(phba); 3240 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3256,7 +3261,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3256 "0702 Issue %s to TGT %d LUN %d " 3261 "0702 Issue %s to TGT %d LUN %d "
3257 "rpi x%x nlp_flag x%x\n", 3262 "rpi x%x nlp_flag x%x\n",
3258 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3263 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3259 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 3264 pnode->nlp_rpi, pnode->nlp_flag);
3260 3265
3261 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3266 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3262 iocbq, iocbqrsp, lpfc_cmd->timeout); 3267 iocbq, iocbqrsp, lpfc_cmd->timeout);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0d1e187b005..554efa6623f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95 return -ENOMEM; 95 return -ENOMEM;
96 /* set consumption flag every once in a while */ 96 /* set consumption flag every once in a while */
97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
98 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); 98 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
99 99
100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
101 101
@@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1735 struct lpfc_vport *vport = pmb->vport; 1735 struct lpfc_vport *vport = pmb->vport;
1736 struct lpfc_dmabuf *mp; 1736 struct lpfc_dmabuf *mp;
1737 struct lpfc_nodelist *ndlp; 1737 struct lpfc_nodelist *ndlp;
1738 struct Scsi_Host *shost;
1738 uint16_t rpi, vpi; 1739 uint16_t rpi, vpi;
1739 int rc; 1740 int rc;
1740 1741
@@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1746 } 1747 }
1747 1748
1748 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1749 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1749 (phba->sli_rev == LPFC_SLI_REV4)) 1750 (phba->sli_rev == LPFC_SLI_REV4) &&
1751 (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
1750 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1752 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1751 1753
1752 /* 1754 /*
@@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1765 return; 1767 return;
1766 } 1768 }
1767 1769
1768 /* Unreg VPI, if the REG_VPI succeed after VLink failure */
1769 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1770 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
1770 !(phba->pport->load_flag & FC_UNLOADING) && 1771 !(phba->pport->load_flag & FC_UNLOADING) &&
1771 !pmb->u.mb.mbxStatus) { 1772 !pmb->u.mb.mbxStatus) {
1772 lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); 1773 shost = lpfc_shost_from_vport(vport);
1773 pmb->vport = vport; 1774 spin_lock_irq(shost->host_lock);
1774 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1775 vport->vpi_state |= LPFC_VPI_REGISTERED;
1775 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1776 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
1776 if (rc != MBX_NOT_FINISHED) 1777 spin_unlock_irq(shost->host_lock);
1777 return;
1778 } 1778 }
1779 1779
1780 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 1780 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
@@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5922 * @phba: Pointer to HBA context object. 5922 * @phba: Pointer to HBA context object.
5923 * 5923 *
5924 * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5924 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
5926 * held. 5926 * held.
5927 * 5927 *
@@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5965 uint16_t abrt_iotag; 5965 uint16_t abrt_iotag;
5966 struct lpfc_iocbq *abrtiocbq; 5966 struct lpfc_iocbq *abrtiocbq;
5967 struct ulp_bde64 *bpl = NULL; 5967 struct ulp_bde64 *bpl = NULL;
5968 uint32_t els_id = ELS_ID_DEFAULT; 5968 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
5969 int numBdes, i; 5969 int numBdes, i;
5970 struct ulp_bde64 bde; 5970 struct ulp_bde64 bde;
5971 5971
@@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5982 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 5982 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5983 abort_tag = (uint32_t) iocbq->iotag; 5983 abort_tag = (uint32_t) iocbq->iotag;
5984 xritag = iocbq->sli4_xritag; 5984 xritag = iocbq->sli4_xritag;
5985 wqe->words[7] = 0; /* The ct field has moved so reset */ 5985 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
5986 /* words0-2 bpl convert bde */ 5986 /* words0-2 bpl convert bde */
5987 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5987 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5988 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 5988 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6033 * contains the FCFI and remote N_Port_ID is 6033 * contains the FCFI and remote N_Port_ID is
6034 * in word 5. 6034 * in word 5.
6035 */ 6035 */
6036
6037 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6036 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
6038 bf_set(lpfc_wqe_gen_context, &wqe->generic, 6037 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
6039 iocbq->iocb.ulpContext); 6038 iocbq->iocb.ulpContext);
6040 6039 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
6041 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 6040 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
6042 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
6043 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 6041 /* CCP CCPE PV PRI in word10 were set in the memcpy */
6044
6045 if (command_type == ELS_COMMAND_FIP) { 6042 if (command_type == ELS_COMMAND_FIP) {
6046 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6043 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6047 >> LPFC_FIP_ELS_ID_SHIFT); 6044 >> LPFC_FIP_ELS_ID_SHIFT);
6048 } 6045 }
6049 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); 6046 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6050 6047 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6048 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
6049 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
6050 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
6051 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
6051 break; 6052 break;
6052 case CMD_XMIT_SEQUENCE64_CX: 6053 case CMD_XMIT_SEQUENCE64_CX:
6053 bf_set(lpfc_wqe_gen_context, &wqe->generic, 6054 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
6054 iocbq->iocb.un.ulpWord[3]); 6055 iocbq->iocb.un.ulpWord[3]);
6055 wqe->generic.word3 = 0; 6056 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
6056 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 6057 iocbq->iocb.ulpContext);
6057 /* The entire sequence is transmitted for this IOCB */ 6058 /* The entire sequence is transmitted for this IOCB */
6058 xmit_len = total_len; 6059 xmit_len = total_len;
6059 cmnd = CMD_XMIT_SEQUENCE64_CR; 6060 cmnd = CMD_XMIT_SEQUENCE64_CR;
6060 case CMD_XMIT_SEQUENCE64_CR: 6061 case CMD_XMIT_SEQUENCE64_CR:
6061 /* word3 iocb=io_tag32 wqe=payload_offset */ 6062 /* word3 iocb=io_tag32 wqe=reserved */
6062 /* payload offset used for multilpe outstanding 6063 wqe->xmit_sequence.rsvd3 = 0;
6063 * sequences on the same exchange
6064 */
6065 wqe->words[3] = 0;
6066 /* word4 relative_offset memcpy */ 6064 /* word4 relative_offset memcpy */
6067 /* word5 r_ctl/df_ctl memcpy */ 6065 /* word5 r_ctl/df_ctl memcpy */
6068 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6066 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
6067 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
6068 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
6069 LPFC_WQE_IOD_WRITE);
6070 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
6071 LPFC_WQE_LENLOC_WORD12);
6072 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
6069 wqe->xmit_sequence.xmit_len = xmit_len; 6073 wqe->xmit_sequence.xmit_len = xmit_len;
6070 command_type = OTHER_COMMAND; 6074 command_type = OTHER_COMMAND;
6071 break; 6075 break;
6072 case CMD_XMIT_BCAST64_CN: 6076 case CMD_XMIT_BCAST64_CN:
6073 /* word3 iocb=iotag32 wqe=payload_len */ 6077 /* word3 iocb=iotag32 wqe=seq_payload_len */
6074 wqe->words[3] = 0; /* no definition for this in wqe */ 6078 wqe->xmit_bcast64.seq_payload_len = xmit_len;
6075 /* word4 iocb=rsvd wqe=rsvd */ 6079 /* word4 iocb=rsvd wqe=rsvd */
6076 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 6080 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
6077 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 6081 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
6078 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6082 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
6079 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6083 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6084 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
6085 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
6086 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
6087 LPFC_WQE_LENLOC_WORD3);
6088 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
6080 break; 6089 break;
6081 case CMD_FCP_IWRITE64_CR: 6090 case CMD_FCP_IWRITE64_CR:
6082 command_type = FCP_COMMAND_DATA_OUT; 6091 command_type = FCP_COMMAND_DATA_OUT;
6083 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat 6092 /* word3 iocb=iotag wqe=payload_offset_len */
6084 * confusing. 6093 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
6085 * word3 is payload_len: byte offset to the sgl entry for the 6094 wqe->fcp_iwrite.payload_offset_len =
6086 * fcp_command. 6095 xmit_len + sizeof(struct fcp_rsp);
6087 * word4 is total xfer len, same as the IOCB->ulpParameter. 6096 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
6088 * word5 is initial xfer len 0 = wait for xfer-ready 6097 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
6089 */ 6098 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
6090 6099 iocbq->iocb.ulpFCP2Rcvy);
6091 /* Always wait for xfer-ready before sending data */ 6100 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
6092 wqe->fcp_iwrite.initial_xfer_len = 0; 6101 /* Always open the exchange */
6093 /* word 4 (xfer length) should have been set on the memcpy */ 6102 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
6094 6103 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
6095 /* allow write to fall through to read */ 6104 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
6105 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
6106 LPFC_WQE_LENLOC_WORD4);
6107 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
6108 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
6109 break;
6096 case CMD_FCP_IREAD64_CR: 6110 case CMD_FCP_IREAD64_CR:
6097 /* FCP_CMD is always the 1st sgl entry */ 6111 /* word3 iocb=iotag wqe=payload_offset_len */
6098 wqe->fcp_iread.payload_len = 6112 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
6113 wqe->fcp_iread.payload_offset_len =
6099 xmit_len + sizeof(struct fcp_rsp); 6114 xmit_len + sizeof(struct fcp_rsp);
6100 6115 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
6101 /* word 4 (xfer length) should have been set on the memcpy */ 6116 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
6102 6117 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
6103 bf_set(lpfc_wqe_gen_erp, &wqe->generic, 6118 iocbq->iocb.ulpFCP2Rcvy);
6104 iocbq->iocb.ulpFCP2Rcvy); 6119 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
6105 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
6106 /* The XC bit and the XS bit are similar. The driver never
6107 * tracked whether or not the exchange was previouslly open.
6108 * XC = Exchange create, 0 is create. 1 is already open.
6109 * XS = link cmd: 1 do not close the exchange after command.
6110 * XS = 0 close exchange when command completes.
6111 * The only time we would not set the XC bit is when the XS bit
6112 * is set and we are sending our 2nd or greater command on
6113 * this exchange.
6114 */
6115 /* Always open the exchange */ 6120 /* Always open the exchange */
6116 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6121 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
6117 6122 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
6118 wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 6123 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
6119 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6124 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
6120 break; 6125 LPFC_WQE_LENLOC_WORD4);
6126 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
6127 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
6128 break;
6121 case CMD_FCP_ICMND64_CR: 6129 case CMD_FCP_ICMND64_CR:
6130 /* word3 iocb=IO_TAG wqe=reserved */
6131 wqe->fcp_icmd.rsrvd3 = 0;
6132 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
6122 /* Always open the exchange */ 6133 /* Always open the exchange */
6123 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6134 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
6124 6135 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
6125 wqe->words[4] = 0; 6136 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
6126 wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 6137 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
6127 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 6138 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
6139 LPFC_WQE_LENLOC_NONE);
6140 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
6128 break; 6141 break;
6129 case CMD_GEN_REQUEST64_CR: 6142 case CMD_GEN_REQUEST64_CR:
6130 /* word3 command length is described as byte offset to the 6143 /* word3 iocb=IO_TAG wqe=request_payload_len */
6131 * rsp_data. Would always be 16, sizeof(struct sli4_sge) 6144 wqe->gen_req.request_payload_len = xmit_len;
6132 * sgl[0] = cmnd 6145 /* word4 iocb=parameter wqe=relative_offset memcpy */
6133 * sgl[1] = rsp. 6146 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
6134 *
6135 */
6136 wqe->gen_req.command_len = xmit_len;
6137 /* Word4 parameter copied in the memcpy */
6138 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6139 /* word6 context tag copied in memcpy */ 6147 /* word6 context tag copied in memcpy */
6140 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 6148 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
6141 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6149 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
@@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6144 ct, iocbq->iocb.ulpCommand); 6152 ct, iocbq->iocb.ulpCommand);
6145 return IOCB_ERROR; 6153 return IOCB_ERROR;
6146 } 6154 }
6147 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); 6155 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
6148 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, 6156 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
6149 iocbq->iocb.ulpTimeout); 6157 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
6150 6158 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
6151 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6159 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
6160 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
6161 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
6162 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
6152 command_type = OTHER_COMMAND; 6163 command_type = OTHER_COMMAND;
6153 break; 6164 break;
6154 case CMD_XMIT_ELS_RSP64_CX: 6165 case CMD_XMIT_ELS_RSP64_CX:
6155 /* words0-2 BDE memcpy */ 6166 /* words0-2 BDE memcpy */
6156 /* word3 iocb=iotag32 wqe=rsvd */ 6167 /* word3 iocb=iotag32 wqe=response_payload_len */
6157 wqe->words[3] = 0; 6168 wqe->xmit_els_rsp.response_payload_len = xmit_len;
6158 /* word4 iocb=did wge=rsvd. */ 6169 /* word4 iocb=did wge=rsvd. */
6159 wqe->words[4] = 0; 6170 wqe->xmit_els_rsp.rsvd4 = 0;
6160 /* word5 iocb=rsvd wge=did */ 6171 /* word5 iocb=rsvd wge=did */
6161 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 6172 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
6162 iocbq->iocb.un.elsreq64.remoteID); 6173 iocbq->iocb.un.elsreq64.remoteID);
6163 6174 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
6164 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6175 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6165 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6176 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
6166 6177 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6167 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 6178 iocbq->iocb.ulpContext);
6168 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
6169 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 6179 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6170 bf_set(lpfc_wqe_gen_context, &wqe->generic, 6180 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6171 iocbq->vport->vpi + phba->vpi_base); 6181 iocbq->vport->vpi + phba->vpi_base);
6182 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
6183 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
6184 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
6185 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6186 LPFC_WQE_LENLOC_WORD3);
6187 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6172 command_type = OTHER_COMMAND; 6188 command_type = OTHER_COMMAND;
6173 break; 6189 break;
6174 case CMD_CLOSE_XRI_CN: 6190 case CMD_CLOSE_XRI_CN:
@@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6193 else 6209 else
6194 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6210 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6195 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6211 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6196 wqe->words[5] = 0; 6212 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
6197 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6213 wqe->abort_cmd.rsrvd5 = 0;
6214 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
6198 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6215 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6199 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6216 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6200 /* 6217 /*
6201 * The abort handler will send us CMD_ABORT_XRI_CN or 6218 * The abort handler will send us CMD_ABORT_XRI_CN or
6202 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6219 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
6203 */ 6220 */
6204 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); 6221 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
6222 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
6223 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
6224 LPFC_WQE_LENLOC_NONE);
6205 cmnd = CMD_ABORT_XRI_CX; 6225 cmnd = CMD_ABORT_XRI_CX;
6206 command_type = OTHER_COMMAND; 6226 command_type = OTHER_COMMAND;
6207 xritag = 0; 6227 xritag = 0;
@@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6235 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 6255 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6236 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 6256 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6237 iocbq->iocb.ulpContext); 6257 iocbq->iocb.ulpContext);
6258 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
6259 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
6260 LPFC_WQE_LENLOC_NONE);
6238 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6261 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6239 command_type = OTHER_COMMAND; 6262 command_type = OTHER_COMMAND;
6240 break; 6263 break;
6241 case CMD_XRI_ABORTED_CX: 6264 case CMD_XRI_ABORTED_CX:
6242 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6265 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6243 /* words0-2 are all 0's no bde */
6244 /* word3 and word4 are rsvrd */
6245 wqe->words[3] = 0;
6246 wqe->words[4] = 0;
6247 /* word5 iocb=rsvd wge=did */
6248 /* There is no remote port id in the IOCB? */
6249 /* Let this fall through and fail */
6250 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 6266 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
6251 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 6267 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
6252 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 6268 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
@@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6257 iocbq->iocb.ulpCommand); 6273 iocbq->iocb.ulpCommand);
6258 return IOCB_ERROR; 6274 return IOCB_ERROR;
6259 break; 6275 break;
6260
6261 } 6276 }
6262 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); 6277 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
6263 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); 6278 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
6264 wqe->generic.abort_tag = abort_tag; 6279 wqe->generic.wqe_com.abort_tag = abort_tag;
6265 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); 6280 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
6266 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); 6281 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
6267 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); 6282 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
6268 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); 6283 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
6269
6270 return 0; 6284 return 0;
6271} 6285}
6272 6286
@@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7257} 7271}
7258 7272
7259/** 7273/**
7260 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7274 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
7261 * @phba: Pointer to HBA context object. 7275 * @phba: Pointer to HBA context object.
7262 * @pring: Pointer to driver SLI ring object. 7276 * @pring: Pointer to driver SLI ring object.
7263 * @cmdiocb: Pointer to driver command iocb object. 7277 * @cmdiocb: Pointer to driver command iocb object.
7264 * 7278 *
7265 * This function issues an abort iocb for the provided command 7279 * This function issues an abort iocb for the provided command iocb down to
7266 * iocb. This function is called with hbalock held. 7280 * the port. Other than the case the outstanding command iocb is an abort
7267 * The function returns 0 when it fails due to memory allocation 7281 * request, this function issues abort out unconditionally. This function is
7268 * failure or when the command iocb is an abort request. 7282 * called with hbalock held. The function returns 0 when it fails due to
7283 * memory allocation failure or when the command iocb is an abort request.
7269 **/ 7284 **/
7270int 7285static int
7271lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7286lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7272 struct lpfc_iocbq *cmdiocb) 7287 struct lpfc_iocbq *cmdiocb)
7273{ 7288{
7274 struct lpfc_vport *vport = cmdiocb->vport; 7289 struct lpfc_vport *vport = cmdiocb->vport;
7275 struct lpfc_iocbq *abtsiocbp; 7290 struct lpfc_iocbq *abtsiocbp;
7276 IOCB_t *icmd = NULL; 7291 IOCB_t *icmd = NULL;
7277 IOCB_t *iabt = NULL; 7292 IOCB_t *iabt = NULL;
7278 int retval = IOCB_ERROR; 7293 int retval;
7279 7294
7280 /* 7295 /*
7281 * There are certain command types we don't want to abort. And we 7296 * There are certain command types we don't want to abort. And we
@@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7288 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7303 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
7289 return 0; 7304 return 0;
7290 7305
7291 /* If we're unloading, don't abort iocb on the ELS ring, but change the
7292 * callback so that nothing happens when it finishes.
7293 */
7294 if ((vport->load_flag & FC_UNLOADING) &&
7295 (pring->ringno == LPFC_ELS_RING)) {
7296 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
7297 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
7298 else
7299 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
7300 goto abort_iotag_exit;
7301 }
7302
7303 /* issue ABTS for this IOCB based on iotag */ 7306 /* issue ABTS for this IOCB based on iotag */
7304 abtsiocbp = __lpfc_sli_get_iocbq(phba); 7307 abtsiocbp = __lpfc_sli_get_iocbq(phba);
7305 if (abtsiocbp == NULL) 7308 if (abtsiocbp == NULL)
@@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7344 7347
7345 if (retval) 7348 if (retval)
7346 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7349 __lpfc_sli_release_iocbq(phba, abtsiocbp);
7350
7351 /*
7352 * Caller to this routine should check for IOCB_ERROR
7353 * and handle it properly. This routine no longer removes
7354 * iocb off txcmplq and call compl in case of IOCB_ERROR.
7355 */
7356 return retval;
7357}
7358
7359/**
7360 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
7361 * @phba: Pointer to HBA context object.
7362 * @pring: Pointer to driver SLI ring object.
7363 * @cmdiocb: Pointer to driver command iocb object.
7364 *
7365 * This function issues an abort iocb for the provided command iocb. In case
7366 * of unloading, the abort iocb will not be issued to commands on the ELS
7367 * ring. Instead, the callback function shall be changed to those commands
7368 * so that nothing happens when them finishes. This function is called with
7369 * hbalock held. The function returns 0 when the command iocb is an abort
7370 * request.
7371 **/
7372int
7373lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7374 struct lpfc_iocbq *cmdiocb)
7375{
7376 struct lpfc_vport *vport = cmdiocb->vport;
7377 int retval = IOCB_ERROR;
7378 IOCB_t *icmd = NULL;
7379
7380 /*
7381 * There are certain command types we don't want to abort. And we
7382 * don't want to abort commands that are already in the process of
7383 * being aborted.
7384 */
7385 icmd = &cmdiocb->iocb;
7386 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
7387 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7388 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
7389 return 0;
7390
7391 /*
7392 * If we're unloading, don't abort iocb on the ELS ring, but change
7393 * the callback so that nothing happens when it finishes.
7394 */
7395 if ((vport->load_flag & FC_UNLOADING) &&
7396 (pring->ringno == LPFC_ELS_RING)) {
7397 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
7398 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
7399 else
7400 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
7401 goto abort_iotag_exit;
7402 }
7403
7404 /* Now, we try to issue the abort to the cmdiocb out */
7405 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
7406
7347abort_iotag_exit: 7407abort_iotag_exit:
7348 /* 7408 /*
7349 * Caller to this routine should check for IOCB_ERROR 7409 * Caller to this routine should check for IOCB_ERROR
@@ -7354,6 +7414,62 @@ abort_iotag_exit:
7354} 7414}
7355 7415
7356/** 7416/**
7417 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
7418 * @phba: Pointer to HBA context object.
7419 * @pring: Pointer to driver SLI ring object.
7420 *
7421 * This function aborts all iocbs in the given ring and frees all the iocb
7422 * objects in txq. This function issues abort iocbs unconditionally for all
7423 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
7424 * to complete before the return of this function. The caller is not required
7425 * to hold any locks.
7426 **/
7427static void
7428lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
7429{
7430 LIST_HEAD(completions);
7431 struct lpfc_iocbq *iocb, *next_iocb;
7432
7433 if (pring->ringno == LPFC_ELS_RING)
7434 lpfc_fabric_abort_hba(phba);
7435
7436 spin_lock_irq(&phba->hbalock);
7437
7438 /* Take off all the iocbs on txq for cancelling */
7439 list_splice_init(&pring->txq, &completions);
7440 pring->txq_cnt = 0;
7441
7442 /* Next issue ABTS for everything on the txcmplq */
7443 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
7444 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
7445
7446 spin_unlock_irq(&phba->hbalock);
7447
7448 /* Cancel all the IOCBs from the completions list */
7449 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7450 IOERR_SLI_ABORTED);
7451}
7452
7453/**
7454 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
7455 * @phba: pointer to lpfc HBA data structure.
7456 *
7457 * This routine will abort all pending and outstanding iocbs to an HBA.
7458 **/
7459void
7460lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
7461{
7462 struct lpfc_sli *psli = &phba->sli;
7463 struct lpfc_sli_ring *pring;
7464 int i;
7465
7466 for (i = 0; i < psli->num_rings; i++) {
7467 pring = &psli->ring[i];
7468 lpfc_sli_iocb_ring_abort(phba, pring);
7469 }
7470}
7471
7472/**
7357 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 7473 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
7358 * @iocbq: Pointer to driver iocb object. 7474 * @iocbq: Pointer to driver iocb object.
7359 * @vport: Pointer to driver virtual port object. 7475 * @vport: Pointer to driver virtual port object.
@@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12242 /* Issue the mailbox command asynchronously */ 12358 /* Issue the mailbox command asynchronously */
12243 mboxq->vport = phba->pport; 12359 mboxq->vport = phba->pport;
12244 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12360 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
12361
12362 spin_lock_irq(&phba->hbalock);
12363 phba->hba_flag |= FCF_TS_INPROG;
12364 spin_unlock_irq(&phba->hbalock);
12365
12245 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12246 if (rc == MBX_NOT_FINISHED) 12367 if (rc == MBX_NOT_FINISHED)
12247 error = -EIO; 12368 error = -EIO;
12248 else { 12369 else {
12249 spin_lock_irq(&phba->hbalock);
12250 phba->hba_flag |= FCF_DISC_INPROGRESS;
12251 spin_unlock_irq(&phba->hbalock);
12252 /* Reset eligible FCF count for new scan */ 12370 /* Reset eligible FCF count for new scan */
12253 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12371 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12254 phba->fcf.eligible_fcf_cnt = 0; 12372 phba->fcf.eligible_fcf_cnt = 0;
@@ -12258,21 +12376,21 @@ fail_fcf_scan:
12258 if (error) { 12376 if (error) {
12259 if (mboxq) 12377 if (mboxq)
12260 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12378 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12261 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ 12379 /* FCF scan failed, clear FCF_TS_INPROG flag */
12262 spin_lock_irq(&phba->hbalock); 12380 spin_lock_irq(&phba->hbalock);
12263 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 12381 phba->hba_flag &= ~FCF_TS_INPROG;
12264 spin_unlock_irq(&phba->hbalock); 12382 spin_unlock_irq(&phba->hbalock);
12265 } 12383 }
12266 return error; 12384 return error;
12267} 12385}
12268 12386
12269/** 12387/**
12270 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. 12388 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
12271 * @phba: pointer to lpfc hba data structure. 12389 * @phba: pointer to lpfc hba data structure.
12272 * @fcf_index: FCF table entry offset. 12390 * @fcf_index: FCF table entry offset.
12273 * 12391 *
12274 * This routine is invoked to read an FCF record indicated by @fcf_index 12392 * This routine is invoked to read an FCF record indicated by @fcf_index
12275 * and to use it for FLOGI round robin FCF failover. 12393 * and to use it for FLOGI roundrobin FCF failover.
12276 * 12394 *
12277 * Return 0 if the mailbox command is submitted sucessfully, none 0 12395 * Return 0 if the mailbox command is submitted sucessfully, none 0
12278 * otherwise. 12396 * otherwise.
@@ -12318,7 +12436,7 @@ fail_fcf_read:
12318 * @fcf_index: FCF table entry offset. 12436 * @fcf_index: FCF table entry offset.
12319 * 12437 *
12320 * This routine is invoked to read an FCF record indicated by @fcf_index to 12438 * This routine is invoked to read an FCF record indicated by @fcf_index to
12321 * determine whether it's eligible for FLOGI round robin failover list. 12439 * determine whether it's eligible for FLOGI roundrobin failover list.
12322 * 12440 *
12323 * Return 0 if the mailbox command is submitted sucessfully, none 0 12441 * Return 0 if the mailbox command is submitted sucessfully, none 0
12324 * otherwise. 12442 * otherwise.
@@ -12364,7 +12482,7 @@ fail_fcf_read:
12364 * 12482 *
12365 * This routine is to get the next eligible FCF record index in a round 12483 * This routine is to get the next eligible FCF record index in a round
12366 * robin fashion. If the next eligible FCF record index equals to the 12484 * robin fashion. If the next eligible FCF record index equals to the
12367 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12485 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12368 * shall be returned, otherwise, the next eligible FCF record's index 12486 * shall be returned, otherwise, the next eligible FCF record's index
12369 * shall be returned. 12487 * shall be returned.
12370 **/ 12488 **/
@@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12392 return LPFC_FCOE_FCF_NEXT_NONE; 12510 return LPFC_FCOE_FCF_NEXT_NONE;
12393 } 12511 }
12394 12512
12395 /* Check roundrobin failover index bmask stop condition */
12396 if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
12397 if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
12398 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
12399 "2847 Round robin failover FCF index "
12400 "search hit stop condition:x%x\n",
12401 next_fcf_index);
12402 return LPFC_FCOE_FCF_NEXT_NONE;
12403 }
12404 /* The roundrobin failover index bmask updated, start over */
12405 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12406 "2848 Round robin failover FCF index bmask "
12407 "updated, start over\n");
12408 spin_lock_irq(&phba->hbalock);
12409 phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
12410 spin_unlock_irq(&phba->hbalock);
12411 return phba->fcf.fcf_rr_init_indx;
12412 }
12413
12414 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12415 "2845 Get next round robin failover " 12514 "2845 Get next roundrobin failover FCF (x%x)\n",
12416 "FCF index x%x\n", next_fcf_index); 12515 next_fcf_index);
12516
12417 return next_fcf_index; 12517 return next_fcf_index;
12418} 12518}
12419 12519
@@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12422 * @phba: pointer to lpfc hba data structure. 12522 * @phba: pointer to lpfc hba data structure.
12423 * 12523 *
12424 * This routine sets the FCF record index in to the eligible bmask for 12524 * This routine sets the FCF record index in to the eligible bmask for
12425 * round robin failover search. It checks to make sure that the index 12525 * roundrobin failover search. It checks to make sure that the index
12426 * does not go beyond the range of the driver allocated bmask dimension 12526 * does not go beyond the range of the driver allocated bmask dimension
12427 * before setting the bit. 12527 * before setting the bit.
12428 * 12528 *
@@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12434{ 12534{
12435 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12535 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12436 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12536 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12437 "2610 HBA FCF index reached driver's " 12537 "2610 FCF (x%x) reached driver's book "
12438 "book keeping dimension: fcf_index:%d, " 12538 "keeping dimension:x%x\n",
12439 "driver_bmask_max:%d\n",
12440 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12539 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12441 return -EINVAL; 12540 return -EINVAL;
12442 } 12541 }
12443 /* Set the eligible FCF record index bmask */ 12542 /* Set the eligible FCF record index bmask */
12444 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12543 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12445 12544
12446 /* Set the roundrobin index bmask updated */
12447 spin_lock_irq(&phba->hbalock);
12448 phba->fcf.fcf_flag |= FCF_REDISC_RRU;
12449 spin_unlock_irq(&phba->hbalock);
12450
12451 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12545 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12452 "2790 Set FCF index x%x to round robin failover " 12546 "2790 Set FCF (x%x) to roundrobin FCF failover "
12453 "bmask\n", fcf_index); 12547 "bmask\n", fcf_index);
12454 12548
12455 return 0; 12549 return 0;
@@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12460 * @phba: pointer to lpfc hba data structure. 12554 * @phba: pointer to lpfc hba data structure.
12461 * 12555 *
12462 * This routine clears the FCF record index from the eligible bmask for 12556 * This routine clears the FCF record index from the eligible bmask for
12463 * round robin failover search. It checks to make sure that the index 12557 * roundrobin failover search. It checks to make sure that the index
12464 * does not go beyond the range of the driver allocated bmask dimension 12558 * does not go beyond the range of the driver allocated bmask dimension
12465 * before clearing the bit. 12559 * before clearing the bit.
12466 **/ 12560 **/
@@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12469{ 12563{
12470 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12564 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12471 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12565 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12472 "2762 HBA FCF index goes beyond driver's " 12566 "2762 FCF (x%x) reached driver's book "
12473 "book keeping dimension: fcf_index:%d, " 12567 "keeping dimension:x%x\n",
12474 "driver_bmask_max:%d\n",
12475 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12568 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12476 return; 12569 return;
12477 } 12570 }
@@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12479 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12572 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12480 12573
12481 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12574 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12482 "2791 Clear FCF index x%x from round robin failover " 12575 "2791 Clear FCF (x%x) from roundrobin failover "
12483 "bmask\n", fcf_index); 12576 "bmask\n", fcf_index);
12484} 12577}
12485 12578
@@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12530 } 12623 }
12531 } else { 12624 } else {
12532 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12625 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12533 "2775 Start FCF rediscovery quiescent period " 12626 "2775 Start FCF rediscover quiescent timer\n");
12534 "wait timer before scaning FCF table\n");
12535 /* 12627 /*
12536 * Start FCF rediscovery wait timer for pending FCF 12628 * Start FCF rediscovery wait timer for pending FCF
12537 * before rescan FCF record table. 12629 * before rescan FCF record table.
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a0ca572ec28..c4483feb8b7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -19,10 +19,16 @@
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100 21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
23#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
24#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 25#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32 26#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10 27#define LPFC_RPI_LOW_WATER_MARK 10
25 28
29#define LPFC_UNREG_FCF 1
30#define LPFC_SKIP_UNREG_FCF 0
31
26/* Amount of time in seconds for waiting FCF rediscovery to complete */ 32/* Amount of time in seconds for waiting FCF rediscovery to complete */
27#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ 33#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
28 34
@@ -163,9 +169,8 @@ struct lpfc_fcf {
163#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ 169#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
164#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 170#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
165#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 171#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
166#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ 172#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
167 uint32_t addr_mode; 173 uint32_t addr_mode;
168 uint16_t fcf_rr_init_indx;
169 uint32_t eligible_fcf_cnt; 174 uint32_t eligible_fcf_cnt;
170 struct lpfc_fcf_rec current_rec; 175 struct lpfc_fcf_rec current_rec;
171 struct lpfc_fcf_rec failover_rec; 176 struct lpfc_fcf_rec failover_rec;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f93120e4c79..7a1b5b112a0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.17" 21#define LPFC_DRIVER_VERSION "8.3.18"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index d3c9cdee292..eb29d508513 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.04.17.1-rc1 13 * Version : v00.00.04.31-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -56,6 +56,15 @@ module_param_named(poll_mode_io, poll_mode_io, int, 0);
56MODULE_PARM_DESC(poll_mode_io, 56MODULE_PARM_DESC(poll_mode_io,
57 "Complete cmds from IO path, (default=0)"); 57 "Complete cmds from IO path, (default=0)");
58 58
59/*
60 * Number of sectors per IO command
61 * Will be set in megasas_init_mfi if user does not provide
62 */
63static unsigned int max_sectors;
64module_param_named(max_sectors, max_sectors, int, 0);
65MODULE_PARM_DESC(max_sectors,
66 "Maximum number of sectors per IO command");
67
59MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
60MODULE_VERSION(MEGASAS_VERSION); 69MODULE_VERSION(MEGASAS_VERSION);
61MODULE_AUTHOR("megaraidlinux@lsi.com"); 70MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -103,6 +112,7 @@ static int megasas_poll_wait_aen;
103static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 112static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
104static u32 support_poll_for_event; 113static u32 support_poll_for_event;
105static u32 megasas_dbg_lvl; 114static u32 megasas_dbg_lvl;
115static u32 support_device_change;
106 116
107/* define lock for aen poll */ 117/* define lock for aen poll */
108spinlock_t poll_aen_lock; 118spinlock_t poll_aen_lock;
@@ -718,6 +728,10 @@ static int
718megasas_check_reset_gen2(struct megasas_instance *instance, 728megasas_check_reset_gen2(struct megasas_instance *instance,
719 struct megasas_register_set __iomem *regs) 729 struct megasas_register_set __iomem *regs)
720{ 730{
731 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
732 return 1;
733 }
734
721 return 0; 735 return 0;
722} 736}
723 737
@@ -930,6 +944,7 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
930 mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); 944 mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
931 mfi_sgl->sge_skinny[i].phys_addr = 945 mfi_sgl->sge_skinny[i].phys_addr =
932 sg_dma_address(os_sgl); 946 sg_dma_address(os_sgl);
947 mfi_sgl->sge_skinny[i].flag = 0;
933 } 948 }
934 } 949 }
935 return sge_count; 950 return sge_count;
@@ -1557,6 +1572,28 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1557 } 1572 }
1558} 1573}
1559 1574
1575static void
1576megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
1577
1578static void
1579process_fw_state_change_wq(struct work_struct *work);
1580
1581void megasas_do_ocr(struct megasas_instance *instance)
1582{
1583 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
1584 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
1585 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
1586 *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN;
1587 }
1588 instance->instancet->disable_intr(instance->reg_set);
1589 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
1590 instance->issuepend_done = 0;
1591
1592 atomic_set(&instance->fw_outstanding, 0);
1593 megasas_internal_reset_defer_cmds(instance);
1594 process_fw_state_change_wq(&instance->work_init);
1595}
1596
1560/** 1597/**
1561 * megasas_wait_for_outstanding - Wait for all outstanding cmds 1598 * megasas_wait_for_outstanding - Wait for all outstanding cmds
1562 * @instance: Adapter soft state 1599 * @instance: Adapter soft state
@@ -1574,6 +1611,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1574 unsigned long flags; 1611 unsigned long flags;
1575 struct list_head clist_local; 1612 struct list_head clist_local;
1576 struct megasas_cmd *reset_cmd; 1613 struct megasas_cmd *reset_cmd;
1614 u32 fw_state;
1615 u8 kill_adapter_flag;
1577 1616
1578 spin_lock_irqsave(&instance->hba_lock, flags); 1617 spin_lock_irqsave(&instance->hba_lock, flags);
1579 adprecovery = instance->adprecovery; 1618 adprecovery = instance->adprecovery;
@@ -1659,7 +1698,45 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1659 msleep(1000); 1698 msleep(1000);
1660 } 1699 }
1661 1700
1662 if (atomic_read(&instance->fw_outstanding)) { 1701 i = 0;
1702 kill_adapter_flag = 0;
1703 do {
1704 fw_state = instance->instancet->read_fw_status_reg(
1705 instance->reg_set) & MFI_STATE_MASK;
1706 if ((fw_state == MFI_STATE_FAULT) &&
1707 (instance->disableOnlineCtrlReset == 0)) {
1708 if (i == 3) {
1709 kill_adapter_flag = 2;
1710 break;
1711 }
1712 megasas_do_ocr(instance);
1713 kill_adapter_flag = 1;
1714
1715 /* wait for 1 secs to let FW finish the pending cmds */
1716 msleep(1000);
1717 }
1718 i++;
1719 } while (i <= 3);
1720
1721 if (atomic_read(&instance->fw_outstanding) &&
1722 !kill_adapter_flag) {
1723 if (instance->disableOnlineCtrlReset == 0) {
1724
1725 megasas_do_ocr(instance);
1726
1727 /* wait for 5 secs to let FW finish the pending cmds */
1728 for (i = 0; i < wait_time; i++) {
1729 int outstanding =
1730 atomic_read(&instance->fw_outstanding);
1731 if (!outstanding)
1732 return SUCCESS;
1733 msleep(1000);
1734 }
1735 }
1736 }
1737
1738 if (atomic_read(&instance->fw_outstanding) ||
1739 (kill_adapter_flag == 2)) {
1663 printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); 1740 printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
1664 /* 1741 /*
1665 * Send signal to FW to stop processing any pending cmds. 1742 * Send signal to FW to stop processing any pending cmds.
@@ -2669,6 +2746,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2669 return -ENOMEM; 2746 return -ENOMEM;
2670 } 2747 }
2671 2748
2749 memset(cmd->frame, 0, total_sz);
2672 cmd->frame->io.context = cmd->index; 2750 cmd->frame->io.context = cmd->index;
2673 cmd->frame->io.pad_0 = 0; 2751 cmd->frame->io.pad_0 = 0;
2674 } 2752 }
@@ -3585,6 +3663,27 @@ static int megasas_io_attach(struct megasas_instance *instance)
3585 instance->max_fw_cmds - MEGASAS_INT_CMDS; 3663 instance->max_fw_cmds - MEGASAS_INT_CMDS;
3586 host->this_id = instance->init_id; 3664 host->this_id = instance->init_id;
3587 host->sg_tablesize = instance->max_num_sge; 3665 host->sg_tablesize = instance->max_num_sge;
3666 /*
3667 * Check if the module parameter value for max_sectors can be used
3668 */
3669 if (max_sectors && max_sectors < instance->max_sectors_per_req)
3670 instance->max_sectors_per_req = max_sectors;
3671 else {
3672 if (max_sectors) {
3673 if (((instance->pdev->device ==
3674 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
3675 (instance->pdev->device ==
3676 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
3677 (max_sectors <= MEGASAS_MAX_SECTORS)) {
3678 instance->max_sectors_per_req = max_sectors;
3679 } else {
3680 printk(KERN_INFO "megasas: max_sectors should be > 0"
3681 "and <= %d (or < 1MB for GEN2 controller)\n",
3682 instance->max_sectors_per_req);
3683 }
3684 }
3685 }
3686
3588 host->max_sectors = instance->max_sectors_per_req; 3687 host->max_sectors = instance->max_sectors_per_req;
3589 host->cmd_per_lun = 128; 3688 host->cmd_per_lun = 128;
3590 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 3689 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
@@ -4658,6 +4757,15 @@ megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
4658static DRIVER_ATTR(support_poll_for_event, S_IRUGO, 4757static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
4659 megasas_sysfs_show_support_poll_for_event, NULL); 4758 megasas_sysfs_show_support_poll_for_event, NULL);
4660 4759
4760 static ssize_t
4761megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
4762{
4763 return sprintf(buf, "%u\n", support_device_change);
4764}
4765
4766static DRIVER_ATTR(support_device_change, S_IRUGO,
4767 megasas_sysfs_show_support_device_change, NULL);
4768
4661static ssize_t 4769static ssize_t
4662megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 4770megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
4663{ 4771{
@@ -4978,6 +5086,7 @@ static int __init megasas_init(void)
4978 MEGASAS_EXT_VERSION); 5086 MEGASAS_EXT_VERSION);
4979 5087
4980 support_poll_for_event = 2; 5088 support_poll_for_event = 2;
5089 support_device_change = 1;
4981 5090
4982 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 5091 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
4983 5092
@@ -5026,8 +5135,17 @@ static int __init megasas_init(void)
5026 if (rval) 5135 if (rval)
5027 goto err_dcf_poll_mode_io; 5136 goto err_dcf_poll_mode_io;
5028 5137
5138 rval = driver_create_file(&megasas_pci_driver.driver,
5139 &driver_attr_support_device_change);
5140 if (rval)
5141 goto err_dcf_support_device_change;
5142
5029 return rval; 5143 return rval;
5030 5144
5145err_dcf_support_device_change:
5146 driver_remove_file(&megasas_pci_driver.driver,
5147 &driver_attr_poll_mode_io);
5148
5031err_dcf_poll_mode_io: 5149err_dcf_poll_mode_io:
5032 driver_remove_file(&megasas_pci_driver.driver, 5150 driver_remove_file(&megasas_pci_driver.driver,
5033 &driver_attr_dbg_lvl); 5151 &driver_attr_dbg_lvl);
@@ -5058,6 +5176,10 @@ static void __exit megasas_exit(void)
5058 driver_remove_file(&megasas_pci_driver.driver, 5176 driver_remove_file(&megasas_pci_driver.driver,
5059 &driver_attr_dbg_lvl); 5177 &driver_attr_dbg_lvl);
5060 driver_remove_file(&megasas_pci_driver.driver, 5178 driver_remove_file(&megasas_pci_driver.driver,
5179 &driver_attr_support_poll_for_event);
5180 driver_remove_file(&megasas_pci_driver.driver,
5181 &driver_attr_support_device_change);
5182 driver_remove_file(&megasas_pci_driver.driver,
5061 &driver_attr_release_date); 5183 &driver_attr_release_date);
5062 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 5184 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
5063 5185
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 16a4f68a34b..ad16f5e6004 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.04.17.1-rc1" 21#define MEGASAS_VERSION "00.00.04.31-rc1"
22#define MEGASAS_RELDATE "Oct. 29, 2009" 22#define MEGASAS_RELDATE "May 3, 2010"
23#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009" 23#define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
@@ -706,6 +706,7 @@ struct megasas_ctrl_info {
706#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ 706#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
707 MEGASAS_MAX_DEV_PER_CHANNEL) 707 MEGASAS_MAX_DEV_PER_CHANNEL)
708 708
709#define MEGASAS_MAX_SECTORS (2*1024)
709#define MEGASAS_DBG_LVL 1 710#define MEGASAS_DBG_LVL 1
710 711
711#define MEGASAS_FW_BUSY 1 712#define MEGASAS_FW_BUSY 1
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index e88bbdde49c..0433ea6f27c 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -452,10 +452,6 @@ void osd_end_request(struct osd_request *or)
452{ 452{
453 struct request *rq = or->request; 453 struct request *rq = or->request;
454 454
455 _osd_free_seg(or, &or->set_attr);
456 _osd_free_seg(or, &or->enc_get_attr);
457 _osd_free_seg(or, &or->get_attr);
458
459 if (rq) { 455 if (rq) {
460 if (rq->next_rq) { 456 if (rq->next_rq) {
461 _put_request(rq->next_rq); 457 _put_request(rq->next_rq);
@@ -464,6 +460,12 @@ void osd_end_request(struct osd_request *or)
464 460
465 _put_request(rq); 461 _put_request(rq);
466 } 462 }
463
464 _osd_free_seg(or, &or->get_attr);
465 _osd_free_seg(or, &or->enc_get_attr);
466 _osd_free_seg(or, &or->set_attr);
467 _osd_free_seg(or, &or->cdb_cont);
468
467 _osd_request_free(or); 469 _osd_request_free(or);
468} 470}
469EXPORT_SYMBOL(osd_end_request); 471EXPORT_SYMBOL(osd_end_request);
@@ -547,6 +549,12 @@ static int _osd_realloc_seg(struct osd_request *or,
547 return 0; 549 return 0;
548} 550}
549 551
552static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
553{
554 OSD_DEBUG("total_bytes=%d\n", total_bytes);
555 return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
556}
557
550static int _alloc_set_attr_list(struct osd_request *or, 558static int _alloc_set_attr_list(struct osd_request *or,
551 const struct osd_attr *oa, unsigned nelem, unsigned add_bytes) 559 const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
552{ 560{
@@ -885,6 +893,199 @@ int osd_req_read_kern(struct osd_request *or,
885} 893}
886EXPORT_SYMBOL(osd_req_read_kern); 894EXPORT_SYMBOL(osd_req_read_kern);
887 895
896static int _add_sg_continuation_descriptor(struct osd_request *or,
897 const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
898{
899 struct osd_sg_continuation_descriptor *oscd;
900 u32 oscd_size;
901 unsigned i;
902 int ret;
903
904 oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
905
906 if (!or->cdb_cont.total_bytes) {
907 /* First time, jump over the header, we will write to:
908 * cdb_cont.buff + cdb_cont.total_bytes
909 */
910 or->cdb_cont.total_bytes =
911 sizeof(struct osd_continuation_segment_header);
912 }
913
914 ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
915 if (unlikely(ret))
916 return ret;
917
918 oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
919 oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
920 oscd->hdr.pad_length = 0;
921 oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
922
923 *len = 0;
924 /* copy the sg entries and convert to network byte order */
925 for (i = 0; i < numentries; i++) {
926 oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
927 oscd->entries[i].len = cpu_to_be64(sglist[i].len);
928 *len += sglist[i].len;
929 }
930
931 or->cdb_cont.total_bytes += oscd_size;
932 OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
933 or->cdb_cont.total_bytes, oscd_size, numentries);
934 return 0;
935}
936
937static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
938{
939 struct request_queue *req_q = osd_request_queue(or->osd_dev);
940 struct bio *bio;
941 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
942 struct osd_continuation_segment_header *cont_seg_hdr;
943
944 if (!or->cdb_cont.total_bytes)
945 return 0;
946
947 cont_seg_hdr = or->cdb_cont.buff;
948 cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
949 cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
950
951 /* create a bio for continuation segment */
952 bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
953 GFP_KERNEL);
954 if (unlikely(!bio))
955 return -ENOMEM;
956
957 bio->bi_rw |= REQ_WRITE;
958
959 /* integrity check the continuation before the bio is linked
960 * with the other data segments since the continuation
961 * integrity is separate from the other data segments.
962 */
963 osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
964
965 cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
966
967 /* we can't use _req_append_segment, because we need to link in the
968 * continuation bio to the head of the bio list - the
969 * continuation segment (if it exists) is always the first segment in
970 * the out data buffer.
971 */
972 bio->bi_next = or->out.bio;
973 or->out.bio = bio;
974 or->out.total_bytes += or->cdb_cont.total_bytes;
975
976 return 0;
977}
978
979/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
980 * @sglist that has the scatter gather entries. Scatter-gather enables a write
981 * of multiple none-contiguous areas of an object, in a single call. The extents
982 * may overlap and/or be in any order. The only constrain is that:
983 * total_bytes(sglist) >= total_bytes(bio)
984 */
985int osd_req_write_sg(struct osd_request *or,
986 const struct osd_obj_id *obj, struct bio *bio,
987 const struct osd_sg_entry *sglist, unsigned numentries)
988{
989 u64 len;
990 int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
991
992 if (ret)
993 return ret;
994 osd_req_write(or, obj, 0, bio, len);
995
996 return 0;
997}
998EXPORT_SYMBOL(osd_req_write_sg);
999
1000/* osd_req_read_sg: Read multiple extents of an object into @bio
1001 * See osd_req_write_sg
1002 */
1003int osd_req_read_sg(struct osd_request *or,
1004 const struct osd_obj_id *obj, struct bio *bio,
1005 const struct osd_sg_entry *sglist, unsigned numentries)
1006{
1007 u64 len;
1008 int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
1009
1010 if (ret)
1011 return ret;
1012 osd_req_read(or, obj, 0, bio, len);
1013
1014 return 0;
1015}
1016EXPORT_SYMBOL(osd_req_read_sg);
1017
1018/* SG-list write/read Kern API
1019 *
1020 * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
1021 * of sg_entries. @numentries indicates how many pointers and sg_entries there
1022 * are. By requiring an array of buff pointers. This allows a caller to do a
1023 * single write/read and scatter into multiple buffers.
1024 * NOTE: Each buffer + len should not cross a page boundary.
1025 */
1026static struct bio *_create_sg_bios(struct osd_request *or,
1027 void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
1028{
1029 struct request_queue *q = osd_request_queue(or->osd_dev);
1030 struct bio *bio;
1031 unsigned i;
1032
1033 bio = bio_kmalloc(GFP_KERNEL, numentries);
1034 if (unlikely(!bio)) {
1035 OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
1036 return ERR_PTR(-ENOMEM);
1037 }
1038
1039 for (i = 0; i < numentries; i++) {
1040 unsigned offset = offset_in_page(buff[i]);
1041 struct page *page = virt_to_page(buff[i]);
1042 unsigned len = sglist[i].len;
1043 unsigned added_len;
1044
1045 BUG_ON(offset + len > PAGE_SIZE);
1046 added_len = bio_add_pc_page(q, bio, page, len, offset);
1047 if (unlikely(len != added_len)) {
1048 OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
1049 len, added_len);
1050 bio_put(bio);
1051 return ERR_PTR(-ENOMEM);
1052 }
1053 }
1054
1055 return bio;
1056}
1057
1058int osd_req_write_sg_kern(struct osd_request *or,
1059 const struct osd_obj_id *obj, void **buff,
1060 const struct osd_sg_entry *sglist, unsigned numentries)
1061{
1062 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1063 if (IS_ERR(bio))
1064 return PTR_ERR(bio);
1065
1066 bio->bi_rw |= REQ_WRITE;
1067 osd_req_write_sg(or, obj, bio, sglist, numentries);
1068
1069 return 0;
1070}
1071EXPORT_SYMBOL(osd_req_write_sg_kern);
1072
1073int osd_req_read_sg_kern(struct osd_request *or,
1074 const struct osd_obj_id *obj, void **buff,
1075 const struct osd_sg_entry *sglist, unsigned numentries)
1076{
1077 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1078 if (IS_ERR(bio))
1079 return PTR_ERR(bio);
1080
1081 osd_req_read_sg(or, obj, bio, sglist, numentries);
1082
1083 return 0;
1084}
1085EXPORT_SYMBOL(osd_req_read_sg_kern);
1086
1087
1088
888void osd_req_get_attributes(struct osd_request *or, 1089void osd_req_get_attributes(struct osd_request *or,
889 const struct osd_obj_id *obj) 1090 const struct osd_obj_id *obj)
890{ 1091{
@@ -1218,17 +1419,18 @@ int osd_req_add_get_attr_page(struct osd_request *or,
1218 or->get_attr.buff = attar_page; 1419 or->get_attr.buff = attar_page;
1219 or->get_attr.total_bytes = max_page_len; 1420 or->get_attr.total_bytes = max_page_len;
1220 1421
1221 or->set_attr.buff = set_one_attr->val_ptr;
1222 or->set_attr.total_bytes = set_one_attr->len;
1223
1224 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id); 1422 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1225 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len); 1423 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1226 /* ocdb->attrs_page.get_attr_offset; */ 1424
1425 if (!set_one_attr || !set_one_attr->attr_page)
1426 return 0; /* The set is optional */
1427
1428 or->set_attr.buff = set_one_attr->val_ptr;
1429 or->set_attr.total_bytes = set_one_attr->len;
1227 1430
1228 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page); 1431 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1229 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id); 1432 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1230 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len); 1433 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1231 /* ocdb->attrs_page.set_attr_offset; */
1232 return 0; 1434 return 0;
1233} 1435}
1234EXPORT_SYMBOL(osd_req_add_get_attr_page); 1436EXPORT_SYMBOL(osd_req_add_get_attr_page);
@@ -1248,11 +1450,14 @@ static int _osd_req_finalize_attr_page(struct osd_request *or)
1248 if (ret) 1450 if (ret)
1249 return ret; 1451 return ret;
1250 1452
1453 if (or->set_attr.total_bytes == 0)
1454 return 0;
1455
1251 /* set one value */ 1456 /* set one value */
1252 cdbh->attrs_page.set_attr_offset = 1457 cdbh->attrs_page.set_attr_offset =
1253 osd_req_encode_offset(or, or->out.total_bytes, &out_padding); 1458 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1254 1459
1255 ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL, 1460 ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
1256 &or->out); 1461 &or->out);
1257 return ret; 1462 return ret;
1258} 1463}
@@ -1276,7 +1481,8 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
1276} 1481}
1277 1482
1278static int _osd_req_finalize_data_integrity(struct osd_request *or, 1483static int _osd_req_finalize_data_integrity(struct osd_request *or,
1279 bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key) 1484 bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
1485 const u8 *cap_key)
1280{ 1486{
1281 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1487 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1282 int ret; 1488 int ret;
@@ -1307,7 +1513,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1307 or->out.last_seg = NULL; 1513 or->out.last_seg = NULL;
1308 1514
1309 /* they are now all chained to request sign them all together */ 1515 /* they are now all chained to request sign them all together */
1310 osd_sec_sign_data(&or->out_data_integ, or->out.req->bio, 1516 osd_sec_sign_data(&or->out_data_integ, out_data_bio,
1311 cap_key); 1517 cap_key);
1312 } 1518 }
1313 1519
@@ -1403,6 +1609,8 @@ int osd_finalize_request(struct osd_request *or,
1403{ 1609{
1404 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1610 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1405 bool has_in, has_out; 1611 bool has_in, has_out;
1612 /* Save for data_integrity without the cdb_continuation */
1613 struct bio *out_data_bio = or->out.bio;
1406 u64 out_data_bytes = or->out.total_bytes; 1614 u64 out_data_bytes = or->out.total_bytes;
1407 int ret; 1615 int ret;
1408 1616
@@ -1418,9 +1626,14 @@ int osd_finalize_request(struct osd_request *or,
1418 osd_set_caps(&or->cdb, cap); 1626 osd_set_caps(&or->cdb, cap);
1419 1627
1420 has_in = or->in.bio || or->get_attr.total_bytes; 1628 has_in = or->in.bio || or->get_attr.total_bytes;
1421 has_out = or->out.bio || or->set_attr.total_bytes || 1629 has_out = or->out.bio || or->cdb_cont.total_bytes ||
1422 or->enc_get_attr.total_bytes; 1630 or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
1423 1631
1632 ret = _osd_req_finalize_cdb_cont(or, cap_key);
1633 if (ret) {
1634 OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
1635 return ret;
1636 }
1424 ret = _init_blk_request(or, has_in, has_out); 1637 ret = _init_blk_request(or, has_in, has_out);
1425 if (ret) { 1638 if (ret) {
1426 OSD_DEBUG("_init_blk_request failed\n"); 1639 OSD_DEBUG("_init_blk_request failed\n");
@@ -1458,7 +1671,8 @@ int osd_finalize_request(struct osd_request *or,
1458 } 1671 }
1459 1672
1460 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, 1673 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1461 out_data_bytes, cap_key); 1674 out_data_bio, out_data_bytes,
1675 cap_key);
1462 if (ret) 1676 if (ret)
1463 return ret; 1677 return ret;
1464 1678
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4b8765785ae..cf89091e4c3 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1594,10 +1594,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1594 cfg_entry = &ccn_hcam->cfg_entry; 1594 cfg_entry = &ccn_hcam->cfg_entry;
1595 fw_version = be16_to_cpu(pinstance->inq_data->fw_version); 1595 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
1596 1596
1597 pmcraid_info 1597 pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
1598 ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n", 1598 res: %x:%x:%x:%x\n",
1599 pinstance->ccn.hcam->ilid, 1599 pinstance->ccn.hcam->ilid,
1600 pinstance->ccn.hcam->op_code, 1600 pinstance->ccn.hcam->op_code,
1601 ((pinstance->ccn.hcam->timestamp1) |
1602 ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
1601 pinstance->ccn.hcam->notification_type, 1603 pinstance->ccn.hcam->notification_type,
1602 pinstance->ccn.hcam->notification_lost, 1604 pinstance->ccn.hcam->notification_lost,
1603 pinstance->ccn.hcam->flags, 1605 pinstance->ccn.hcam->flags,
@@ -1850,6 +1852,7 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1850 * none 1852 * none
1851 */ 1853 */
1852static void pmcraid_initiate_reset(struct pmcraid_instance *); 1854static void pmcraid_initiate_reset(struct pmcraid_instance *);
1855static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
1853 1856
1854static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) 1857static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1855{ 1858{
@@ -1881,6 +1884,10 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1881 lock_flags); 1884 lock_flags);
1882 return; 1885 return;
1883 } 1886 }
1887 if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
1888 pinstance->timestamp_error = 1;
1889 pmcraid_set_timestamp(cmd);
1890 }
1884 } else { 1891 } else {
1885 dev_info(&pinstance->pdev->dev, 1892 dev_info(&pinstance->pdev->dev,
1886 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc); 1893 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
@@ -3363,7 +3370,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3363 sg_size = buflen; 3370 sg_size = buflen;
3364 3371
3365 for (i = 0; i < num_elem; i++) { 3372 for (i = 0; i < num_elem; i++) {
3366 page = alloc_pages(GFP_KERNEL|GFP_DMA, order); 3373 page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
3367 if (!page) { 3374 if (!page) {
3368 for (j = i - 1; j >= 0; j--) 3375 for (j = i - 1; j >= 0; j--)
3369 __free_pages(sg_page(&scatterlist[j]), order); 3376 __free_pages(sg_page(&scatterlist[j]), order);
@@ -3739,6 +3746,7 @@ static long pmcraid_ioctl_passthrough(
3739 unsigned long request_buffer; 3746 unsigned long request_buffer;
3740 unsigned long request_offset; 3747 unsigned long request_offset;
3741 unsigned long lock_flags; 3748 unsigned long lock_flags;
3749 void *ioasa;
3742 u32 ioasc; 3750 u32 ioasc;
3743 int request_size; 3751 int request_size;
3744 int buffer_size; 3752 int buffer_size;
@@ -3780,6 +3788,11 @@ static long pmcraid_ioctl_passthrough(
3780 rc = __copy_from_user(buffer, 3788 rc = __copy_from_user(buffer,
3781 (struct pmcraid_passthrough_ioctl_buffer *) arg, 3789 (struct pmcraid_passthrough_ioctl_buffer *) arg,
3782 sizeof(struct pmcraid_passthrough_ioctl_buffer)); 3790 sizeof(struct pmcraid_passthrough_ioctl_buffer));
3791
3792 ioasa =
3793 (void *)(arg +
3794 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
3795
3783 if (rc) { 3796 if (rc) {
3784 pmcraid_err("ioctl: can't copy passthrough buffer\n"); 3797 pmcraid_err("ioctl: can't copy passthrough buffer\n");
3785 rc = -EFAULT; 3798 rc = -EFAULT;
@@ -3947,22 +3960,14 @@ static long pmcraid_ioctl_passthrough(
3947 } 3960 }
3948 3961
3949out_handle_response: 3962out_handle_response:
3950 /* If the command failed for any reason, copy entire IOASA buffer and 3963 /* copy entire IOASA buffer and return IOCTL success.
3951 * return IOCTL success. If copying IOASA to user-buffer fails, return 3964 * If copying IOASA to user-buffer fails, return
3952 * EFAULT 3965 * EFAULT
3953 */ 3966 */
3954 if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) { 3967 if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3955 void *ioasa = 3968 sizeof(struct pmcraid_ioasa))) {
3956 (void *)(arg + 3969 pmcraid_err("failed to copy ioasa buffer to user\n");
3957 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); 3970 rc = -EFAULT;
3958
3959 pmcraid_info("command failed with %x\n",
3960 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
3961 if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3962 sizeof(struct pmcraid_ioasa))) {
3963 pmcraid_err("failed to copy ioasa buffer to user\n");
3964 rc = -EFAULT;
3965 }
3966 } 3971 }
3967 3972
3968 /* If the data transfer was from device, copy the data onto user 3973 /* If the data transfer was from device, copy the data onto user
@@ -5147,6 +5152,16 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
5147 pinstance->inq_data = NULL; 5152 pinstance->inq_data = NULL;
5148 pinstance->inq_data_baddr = 0; 5153 pinstance->inq_data_baddr = 0;
5149 } 5154 }
5155
5156 if (pinstance->timestamp_data != NULL) {
5157 pci_free_consistent(pinstance->pdev,
5158 sizeof(struct pmcraid_timestamp_data),
5159 pinstance->timestamp_data,
5160 pinstance->timestamp_data_baddr);
5161
5162 pinstance->timestamp_data = NULL;
5163 pinstance->timestamp_data_baddr = 0;
5164 }
5150} 5165}
5151 5166
5152/** 5167/**
@@ -5205,6 +5220,20 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5205 return -ENOMEM; 5220 return -ENOMEM;
5206 } 5221 }
5207 5222
5223 /* allocate DMAable memory for set timestamp data buffer */
5224 pinstance->timestamp_data = pci_alloc_consistent(
5225 pinstance->pdev,
5226 sizeof(struct pmcraid_timestamp_data),
5227 &pinstance->timestamp_data_baddr);
5228
5229 if (pinstance->timestamp_data == NULL) {
5230 pmcraid_err("couldn't allocate DMA memory for \
5231 set time_stamp \n");
5232 pmcraid_release_buffers(pinstance);
5233 return -ENOMEM;
5234 }
5235
5236
5208 /* Initialize all the command blocks and add them to free pool. No 5237 /* Initialize all the command blocks and add them to free pool. No
5209 * need to lock (free_pool_lock) as this is done in initialization 5238 * need to lock (free_pool_lock) as this is done in initialization
5210 * itself 5239 * itself
@@ -5610,6 +5639,68 @@ static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
5610} 5639}
5611 5640
5612/** 5641/**
5642 * pmcraid_set_timestamp - set the timestamp to IOAFP
5643 *
5644 * @cmd: pointer to pmcraid_cmd structure
5645 *
5646 * Return Value
5647 * 0 for success or non-zero for failure cases
5648 */
5649static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
5650{
5651 struct pmcraid_instance *pinstance = cmd->drv_inst;
5652 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5653 __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
5654 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5655
5656 struct timeval tv;
5657 __le64 timestamp;
5658
5659 do_gettimeofday(&tv);
5660 timestamp = tv.tv_sec * 1000;
5661
5662 pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
5663 pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
5664 pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
5665 pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
5666 pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
5667 pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
5668
5669 pmcraid_reinit_cmdblk(cmd);
5670 ioarcb->request_type = REQ_TYPE_SCSI;
5671 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5672 ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
5673 ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
5674 memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
5675
5676 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5677 offsetof(struct pmcraid_ioarcb,
5678 add_data.u.ioadl[0]));
5679 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5680 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
5681
5682 ioarcb->request_flags0 |= NO_LINK_DESCS;
5683 ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
5684 ioarcb->data_transfer_length =
5685 cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
5686 ioadl = &(ioarcb->add_data.u.ioadl[0]);
5687 ioadl->flags = IOADL_FLAGS_LAST_DESC;
5688 ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
5689 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
5690
5691 if (!pinstance->timestamp_error) {
5692 pinstance->timestamp_error = 0;
5693 pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
5694 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5695 } else {
5696 pmcraid_send_cmd(cmd, pmcraid_return_cmd,
5697 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5698 return;
5699 }
5700}
5701
5702
5703/**
5613 * pmcraid_init_res_table - Initialize the resource table 5704 * pmcraid_init_res_table - Initialize the resource table
5614 * @cmd: pointer to pmcraid command struct 5705 * @cmd: pointer to pmcraid command struct
5615 * 5706 *
@@ -5720,7 +5811,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5720 5811
5721 /* release the resource list lock */ 5812 /* release the resource list lock */
5722 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); 5813 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
5723 pmcraid_set_supported_devs(cmd); 5814 pmcraid_set_timestamp(cmd);
5724} 5815}
5725 5816
5726/** 5817/**
@@ -6054,10 +6145,10 @@ out_init:
6054static void __exit pmcraid_exit(void) 6145static void __exit pmcraid_exit(void)
6055{ 6146{
6056 pmcraid_netlink_release(); 6147 pmcraid_netlink_release();
6057 class_destroy(pmcraid_class);
6058 unregister_chrdev_region(MKDEV(pmcraid_major, 0), 6148 unregister_chrdev_region(MKDEV(pmcraid_major, 0),
6059 PMCRAID_MAX_ADAPTERS); 6149 PMCRAID_MAX_ADAPTERS);
6060 pci_unregister_driver(&pmcraid_driver); 6150 pci_unregister_driver(&pmcraid_driver);
6151 class_destroy(pmcraid_class);
6061} 6152}
6062 6153
6063module_init(pmcraid_init); 6154module_init(pmcraid_init);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 6cfa0145a1d..1134279604e 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -42,7 +42,7 @@
42 */ 42 */
43#define PMCRAID_DRIVER_NAME "PMC MaxRAID" 43#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
44#define PMCRAID_DEVFILE "pmcsas" 44#define PMCRAID_DEVFILE "pmcsas"
45#define PMCRAID_DRIVER_VERSION "2.0.2" 45#define PMCRAID_DRIVER_VERSION "2.0.3"
46#define PMCRAID_DRIVER_DATE __DATE__ 46#define PMCRAID_DRIVER_DATE __DATE__
47 47
48#define PMCRAID_FW_VERSION_1 0x002 48#define PMCRAID_FW_VERSION_1 0x002
@@ -184,6 +184,7 @@
184#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000 184#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
185#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000 185#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
186#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000 186#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
187#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00
187#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000 188#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
188 189
189/* Driver defined IOASCs */ 190/* Driver defined IOASCs */
@@ -561,6 +562,17 @@ struct pmcraid_inquiry_data {
561 __u8 reserved3[16]; 562 __u8 reserved3[16];
562}; 563};
563 564
565#define PMCRAID_TIMESTAMP_LEN 12
566#define PMCRAID_REQ_TM_STR_LEN 6
567#define PMCRAID_SCSI_SET_TIMESTAMP 0xA4
568#define PMCRAID_SCSI_SERVICE_ACTION 0x0F
569
570struct pmcraid_timestamp_data {
571 __u8 reserved1[4];
572 __u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */
573 __u8 reserved2[2];
574};
575
564/* pmcraid_cmd - LLD representation of SCSI command */ 576/* pmcraid_cmd - LLD representation of SCSI command */
565struct pmcraid_cmd { 577struct pmcraid_cmd {
566 578
@@ -568,7 +580,6 @@ struct pmcraid_cmd {
568 struct pmcraid_control_block *ioa_cb; 580 struct pmcraid_control_block *ioa_cb;
569 dma_addr_t ioa_cb_bus_addr; 581 dma_addr_t ioa_cb_bus_addr;
570 dma_addr_t dma_handle; 582 dma_addr_t dma_handle;
571 u8 *sense_buffer;
572 583
573 /* pointer to mid layer structure of SCSI commands */ 584 /* pointer to mid layer structure of SCSI commands */
574 struct scsi_cmnd *scsi_cmd; 585 struct scsi_cmnd *scsi_cmd;
@@ -705,6 +716,9 @@ struct pmcraid_instance {
705 struct pmcraid_inquiry_data *inq_data; 716 struct pmcraid_inquiry_data *inq_data;
706 dma_addr_t inq_data_baddr; 717 dma_addr_t inq_data_baddr;
707 718
719 struct pmcraid_timestamp_data *timestamp_data;
720 dma_addr_t timestamp_data_baddr;
721
708 /* size of configuration table entry, varies based on the firmware */ 722 /* size of configuration table entry, varies based on the firmware */
709 u32 config_table_entry_size; 723 u32 config_table_entry_size;
710 724
@@ -791,6 +805,7 @@ struct pmcraid_instance {
791#define SHUTDOWN_NONE 0x0 805#define SHUTDOWN_NONE 0x0
792#define SHUTDOWN_NORMAL 0x1 806#define SHUTDOWN_NORMAL 0x1
793#define SHUTDOWN_ABBREV 0x2 807#define SHUTDOWN_ABBREV 0x2
808 u32 timestamp_error:1; /* indicate set timestamp for out of sync */
794 809
795}; 810};
796 811
@@ -1056,10 +1071,10 @@ struct pmcraid_passthrough_ioctl_buffer {
1056#define PMCRAID_PASSTHROUGH_IOCTL 'F' 1071#define PMCRAID_PASSTHROUGH_IOCTL 'F'
1057 1072
1058#define DRV_IOCTL(n, size) \ 1073#define DRV_IOCTL(n, size) \
1059 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) 1074 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
1060 1075
1061#define FMW_IOCTL(n, size) \ 1076#define FMW_IOCTL(n, size) \
1062 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) 1077 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
1063 1078
1064/* 1079/*
1065 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. 1080 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2ff4342ae36..bc8194f7462 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1538,6 +1538,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1538 if (!fcport) 1538 if (!fcport)
1539 return; 1539 return;
1540 1540
1541 /* Now that the rport has been deleted, set the fcport state to
1542 FCS_DEVICE_DEAD */
1543 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
1544
1541 /* 1545 /*
1542 * Transport has effectively 'deleted' the rport, clear 1546 * Transport has effectively 'deleted' the rport, clear
1543 * all local references. 1547 * all local references.
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index fdfbf83a633..31a4121a2be 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1307,6 +1307,125 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1307} 1307}
1308 1308
1309static int 1309static int
1310qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1311 uint8_t is_update)
1312{
1313 uint32_t start = 0;
1314 int valid = 0;
1315
1316 bsg_job->reply->reply_payload_rcv_len = 0;
1317
1318 if (unlikely(pci_channel_offline(ha->pdev)))
1319 return -EINVAL;
1320
1321 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1322 if (start > ha->optrom_size)
1323 return -EINVAL;
1324
1325 if (ha->optrom_state != QLA_SWAITING)
1326 return -EBUSY;
1327
1328 ha->optrom_region_start = start;
1329
1330 if (is_update) {
1331 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1332 valid = 1;
1333 else if (start == (ha->flt_region_boot * 4) ||
1334 start == (ha->flt_region_fw * 4))
1335 valid = 1;
1336 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1337 IS_QLA8XXX_TYPE(ha))
1338 valid = 1;
1339 if (!valid) {
1340 qla_printk(KERN_WARNING, ha,
1341 "Invalid start region 0x%x/0x%x.\n",
1342 start, bsg_job->request_payload.payload_len);
1343 return -EINVAL;
1344 }
1345
1346 ha->optrom_region_size = start +
1347 bsg_job->request_payload.payload_len > ha->optrom_size ?
1348 ha->optrom_size - start :
1349 bsg_job->request_payload.payload_len;
1350 ha->optrom_state = QLA_SWRITING;
1351 } else {
1352 ha->optrom_region_size = start +
1353 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1354 ha->optrom_size - start :
1355 bsg_job->reply_payload.payload_len;
1356 ha->optrom_state = QLA_SREADING;
1357 }
1358
1359 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1360 if (!ha->optrom_buffer) {
1361 qla_printk(KERN_WARNING, ha,
1362 "Read: Unable to allocate memory for optrom retrieval "
1363 "(%x).\n", ha->optrom_region_size);
1364
1365 ha->optrom_state = QLA_SWAITING;
1366 return -ENOMEM;
1367 }
1368
1369 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1370 return 0;
1371}
1372
1373static int
1374qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1375{
1376 struct Scsi_Host *host = bsg_job->shost;
1377 scsi_qla_host_t *vha = shost_priv(host);
1378 struct qla_hw_data *ha = vha->hw;
1379 int rval = 0;
1380
1381 rval = qla2x00_optrom_setup(bsg_job, ha, 0);
1382 if (rval)
1383 return rval;
1384
1385 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1386 ha->optrom_region_start, ha->optrom_region_size);
1387
1388 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1389 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1390 ha->optrom_region_size);
1391
1392 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1393 bsg_job->reply->result = DID_OK;
1394 vfree(ha->optrom_buffer);
1395 ha->optrom_buffer = NULL;
1396 ha->optrom_state = QLA_SWAITING;
1397 bsg_job->job_done(bsg_job);
1398 return rval;
1399}
1400
1401static int
1402qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1403{
1404 struct Scsi_Host *host = bsg_job->shost;
1405 scsi_qla_host_t *vha = shost_priv(host);
1406 struct qla_hw_data *ha = vha->hw;
1407 int rval = 0;
1408
1409 rval = qla2x00_optrom_setup(bsg_job, ha, 1);
1410 if (rval)
1411 return rval;
1412
1413 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1414 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1415 ha->optrom_region_size);
1416
1417 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1418 ha->optrom_region_start, ha->optrom_region_size);
1419
1420 bsg_job->reply->result = DID_OK;
1421 vfree(ha->optrom_buffer);
1422 ha->optrom_buffer = NULL;
1423 ha->optrom_state = QLA_SWAITING;
1424 bsg_job->job_done(bsg_job);
1425 return rval;
1426}
1427
1428static int
1310qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1429qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1311{ 1430{
1312 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 1431 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1328,6 +1447,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1328 case QL_VND_FCP_PRIO_CFG_CMD: 1447 case QL_VND_FCP_PRIO_CFG_CMD:
1329 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 1448 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1330 1449
1450 case QL_VND_READ_FLASH:
1451 return qla2x00_read_optrom(bsg_job);
1452
1453 case QL_VND_UPDATE_FLASH:
1454 return qla2x00_update_optrom(bsg_job);
1455
1331 default: 1456 default:
1332 bsg_job->reply->result = (DID_ERROR << 16); 1457 bsg_job->reply->result = (DID_ERROR << 16);
1333 bsg_job->job_done(bsg_job); 1458 bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index cc7c52f87a1..074a999c701 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -14,6 +14,8 @@
14#define QL_VND_A84_MGMT_CMD 0x04 14#define QL_VND_A84_MGMT_CMD 0x04
15#define QL_VND_IIDMA 0x05 15#define QL_VND_IIDMA 0x05
16#define QL_VND_FCP_PRIO_CFG_CMD 0x06 16#define QL_VND_FCP_PRIO_CFG_CMD 0x06
17#define QL_VND_READ_FLASH 0x07
18#define QL_VND_UPDATE_FLASH 0x08
17 19
18/* BSG definations for interpreting CommandSent field */ 20/* BSG definations for interpreting CommandSent field */
19#define INT_DEF_LB_LOOPBACK_CMD 0 21#define INT_DEF_LB_LOOPBACK_CMD 0
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1d3ad40a94..3a22effced5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1700,9 +1700,7 @@ typedef struct fc_port {
1700 atomic_t state; 1700 atomic_t state;
1701 uint32_t flags; 1701 uint32_t flags;
1702 1702
1703 int port_login_retry_count;
1704 int login_retry; 1703 int login_retry;
1705 atomic_t port_down_timer;
1706 1704
1707 struct fc_rport *rport, *drport; 1705 struct fc_rport *rport, *drport;
1708 u32 supported_classes; 1706 u32 supported_classes;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c33dec827e1..9382a816c13 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -92,6 +92,7 @@ extern int ql2xshiftctondsd;
92extern int ql2xdbwr; 92extern int ql2xdbwr;
93extern int ql2xdontresethba; 93extern int ql2xdontresethba;
94extern int ql2xasynctmfenable; 94extern int ql2xasynctmfenable;
95extern int ql2xgffidenable;
95extern int ql2xenabledif; 96extern int ql2xenabledif;
96extern int ql2xenablehba_err_chk; 97extern int ql2xenablehba_err_chk;
97extern int ql2xtargetreset; 98extern int ql2xtargetreset;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3cafbef4073..259f5113749 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -71,7 +71,7 @@ qla2x00_ctx_sp_free(srb_t *sp)
71 struct srb_iocb *iocb = ctx->u.iocb_cmd; 71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
72 struct scsi_qla_host *vha = sp->fcport->vha; 72 struct scsi_qla_host *vha = sp->fcport->vha;
73 73
74 del_timer_sync(&iocb->timer); 74 del_timer(&iocb->timer);
75 kfree(iocb); 75 kfree(iocb);
76 kfree(ctx); 76 kfree(ctx);
77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
@@ -1344,6 +1344,13 @@ cont_alloc:
1344 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1344 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
1345 "firmware dump!!!\n", dump_size / 1024); 1345 "firmware dump!!!\n", dump_size / 1024);
1346 1346
1347 if (ha->fce) {
1348 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
1349 ha->fce_dma);
1350 ha->fce = NULL;
1351 ha->fce_dma = 0;
1352 }
1353
1347 if (ha->eft) { 1354 if (ha->eft) {
1348 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 1355 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1349 ha->eft_dma); 1356 ha->eft_dma);
@@ -1818,14 +1825,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1818 qla2x00_init_response_q_entries(rsp); 1825 qla2x00_init_response_q_entries(rsp);
1819 } 1826 }
1820 1827
1821 spin_lock_irqsave(&ha->vport_slock, flags); 1828 spin_lock(&ha->vport_slock);
1822 /* Clear RSCN queue. */ 1829 /* Clear RSCN queue. */
1823 list_for_each_entry(vp, &ha->vp_list, list) { 1830 list_for_each_entry(vp, &ha->vp_list, list) {
1824 vp->rscn_in_ptr = 0; 1831 vp->rscn_in_ptr = 0;
1825 vp->rscn_out_ptr = 0; 1832 vp->rscn_out_ptr = 0;
1826 } 1833 }
1827 1834
1828 spin_unlock_irqrestore(&ha->vport_slock, flags); 1835 spin_unlock(&ha->vport_slock);
1829 1836
1830 ha->isp_ops->config_rings(vha); 1837 ha->isp_ops->config_rings(vha);
1831 1838
@@ -2916,21 +2923,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2916void 2923void
2917qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2924qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2918{ 2925{
2919 struct qla_hw_data *ha = vha->hw;
2920
2921 fcport->vha = vha; 2926 fcport->vha = vha;
2922 fcport->login_retry = 0; 2927 fcport->login_retry = 0;
2923 fcport->port_login_retry_count = ha->port_down_retry_count *
2924 PORT_RETRY_TIME;
2925 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2926 PORT_RETRY_TIME);
2927 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 2928 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2928 2929
2929 qla2x00_iidma_fcport(vha, fcport); 2930 qla2x00_iidma_fcport(vha, fcport);
2930
2931 atomic_set(&fcport->state, FCS_ONLINE);
2932
2933 qla2x00_reg_remote_port(vha, fcport); 2931 qla2x00_reg_remote_port(vha, fcport);
2932 atomic_set(&fcport->state, FCS_ONLINE);
2934} 2933}
2935 2934
2936/* 2935/*
@@ -3292,8 +3291,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3292 continue; 3291 continue;
3293 3292
3294 /* Bypass ports whose FCP-4 type is not FCP_SCSI */ 3293 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
3295 if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && 3294 if (ql2xgffidenable &&
3296 new_fcport->fc4_type != FC4_TYPE_UNKNOWN) 3295 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3296 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3297 continue; 3297 continue;
3298 3298
3299 /* Locate matching device in database. */ 3299 /* Locate matching device in database. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 579f0285466..5f94430b42f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -992,8 +992,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
992 ha = vha->hw; 992 ha = vha->hw;
993 993
994 DEBUG18(printk(KERN_DEBUG 994 DEBUG18(printk(KERN_DEBUG
995 "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__, 995 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
996 vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd))); 996 vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
997 997
998 cmd_pkt->vp_index = sp->fcport->vp_idx; 998 cmd_pkt->vp_index = sp->fcport->vp_idx;
999 999
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e0e43d9e7ed..1f06ddd9bdd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1240,12 +1240,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1240 case LSC_SCODE_NPORT_USED: 1240 case LSC_SCODE_NPORT_USED:
1241 data[0] = MBS_LOOP_ID_USED; 1241 data[0] = MBS_LOOP_ID_USED;
1242 break; 1242 break;
1243 case LSC_SCODE_CMD_FAILED:
1244 if ((iop[1] & 0xff) == 0x05) {
1245 data[0] = MBS_NOT_LOGGED_IN;
1246 break;
1247 }
1248 /* Fall through. */
1249 default: 1243 default:
1250 data[0] = MBS_COMMAND_ERROR; 1244 data[0] = MBS_COMMAND_ERROR;
1251 break; 1245 break;
@@ -1431,9 +1425,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1431 rsp->status_srb = sp; 1425 rsp->status_srb = sp;
1432 1426
1433 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 1427 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1434 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 1428 "cmd=%p\n", __func__, sp->fcport->vha->host_no,
1435 cp->device->channel, cp->device->id, cp->device->lun, cp, 1429 cp->device->channel, cp->device->id, cp->device->lun, cp));
1436 cp->serial_number));
1437 if (sense_len) 1430 if (sense_len)
1438 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1431 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1439} 1432}
@@ -1757,6 +1750,8 @@ check_scsi_status:
1757 case CS_INCOMPLETE: 1750 case CS_INCOMPLETE:
1758 case CS_PORT_UNAVAILABLE: 1751 case CS_PORT_UNAVAILABLE:
1759 case CS_TIMEOUT: 1752 case CS_TIMEOUT:
1753 case CS_RESET:
1754
1760 /* 1755 /*
1761 * We are going to have the fc class block the rport 1756 * We are going to have the fc class block the rport
1762 * while we try to recover so instruct the mid layer 1757 * while we try to recover so instruct the mid layer
@@ -1781,10 +1776,6 @@ check_scsi_status:
1781 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1776 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1782 break; 1777 break;
1783 1778
1784 case CS_RESET:
1785 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1786 break;
1787
1788 case CS_ABORTED: 1779 case CS_ABORTED:
1789 cp->result = DID_RESET << 16; 1780 cp->result = DID_RESET << 16;
1790 break; 1781 break;
@@ -1801,10 +1792,10 @@ out:
1801 if (logit) 1792 if (logit)
1802 DEBUG2(qla_printk(KERN_INFO, ha, 1793 DEBUG2(qla_printk(KERN_INFO, ha,
1803 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1794 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1804 "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x " 1795 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1805 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1796 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1806 cp->device->id, cp->device->lun, comp_status, scsi_status, 1797 cp->device->id, cp->device->lun, comp_status, scsi_status,
1807 cp->result, ox_id, cp->serial_number, cp->cmnd[0], 1798 cp->result, ox_id, cp->cmnd[0],
1808 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, 1799 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1809 resid_len, fw_resid_len)); 1800 resid_len, fw_resid_len));
1810 1801
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 800ea926975..1830e6e9731 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -160,6 +160,11 @@ MODULE_PARM_DESC(ql2xtargetreset,
160 "Enable target reset." 160 "Enable target reset."
161 "Default is 1 - use hw defaults."); 161 "Default is 1 - use hw defaults.");
162 162
163int ql2xgffidenable;
164module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
165MODULE_PARM_DESC(ql2xgffidenable,
166 "Enables GFF_ID checks of port type. "
167 "Default is 0 - Do not use GFF_ID information.");
163 168
164int ql2xasynctmfenable; 169int ql2xasynctmfenable;
165module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR); 170module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
@@ -255,6 +260,7 @@ static void qla2x00_rst_aen(scsi_qla_host_t *);
255 260
256static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, 261static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
257 struct req_que **, struct rsp_que **); 262 struct req_que **, struct rsp_que **);
263static void qla2x00_free_fw_dump(struct qla_hw_data *);
258static void qla2x00_mem_free(struct qla_hw_data *); 264static void qla2x00_mem_free(struct qla_hw_data *);
259static void qla2x00_sp_free_dma(srb_t *); 265static void qla2x00_sp_free_dma(srb_t *);
260 266
@@ -539,6 +545,7 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
539 srb_t *sp; 545 srb_t *sp;
540 int rval; 546 int rval;
541 547
548 spin_unlock_irq(vha->host->host_lock);
542 if (ha->flags.eeh_busy) { 549 if (ha->flags.eeh_busy) {
543 if (ha->flags.pci_channel_io_perm_failure) 550 if (ha->flags.pci_channel_io_perm_failure)
544 cmd->result = DID_NO_CONNECT << 16; 551 cmd->result = DID_NO_CONNECT << 16;
@@ -553,10 +560,6 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
553 goto qc24_fail_command; 560 goto qc24_fail_command;
554 } 561 }
555 562
556 /* Close window on fcport/rport state-transitioning. */
557 if (fcport->drport)
558 goto qc24_target_busy;
559
560 if (!vha->flags.difdix_supported && 563 if (!vha->flags.difdix_supported &&
561 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 564 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
562 DEBUG2(qla_printk(KERN_ERR, ha, 565 DEBUG2(qla_printk(KERN_ERR, ha,
@@ -567,15 +570,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
567 } 570 }
568 if (atomic_read(&fcport->state) != FCS_ONLINE) { 571 if (atomic_read(&fcport->state) != FCS_ONLINE) {
569 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 572 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
570 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 573 atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
574 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
571 cmd->result = DID_NO_CONNECT << 16; 575 cmd->result = DID_NO_CONNECT << 16;
572 goto qc24_fail_command; 576 goto qc24_fail_command;
573 } 577 }
574 goto qc24_target_busy; 578 goto qc24_target_busy;
575 } 579 }
576 580
577 spin_unlock_irq(vha->host->host_lock);
578
579 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done); 581 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
580 if (!sp) 582 if (!sp)
581 goto qc24_host_busy_lock; 583 goto qc24_host_busy_lock;
@@ -597,9 +599,11 @@ qc24_host_busy_lock:
597 return SCSI_MLQUEUE_HOST_BUSY; 599 return SCSI_MLQUEUE_HOST_BUSY;
598 600
599qc24_target_busy: 601qc24_target_busy:
602 spin_lock_irq(vha->host->host_lock);
600 return SCSI_MLQUEUE_TARGET_BUSY; 603 return SCSI_MLQUEUE_TARGET_BUSY;
601 604
602qc24_fail_command: 605qc24_fail_command:
606 spin_lock_irq(vha->host->host_lock);
603 done(cmd); 607 done(cmd);
604 608
605 return 0; 609 return 0;
@@ -824,81 +828,58 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
824{ 828{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 829 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 srb_t *sp; 830 srb_t *sp;
827 int ret, i; 831 int ret;
828 unsigned int id, lun; 832 unsigned int id, lun;
829 unsigned long serial;
830 unsigned long flags; 833 unsigned long flags;
831 int wait = 0; 834 int wait = 0;
832 struct qla_hw_data *ha = vha->hw; 835 struct qla_hw_data *ha = vha->hw;
833 struct req_que *req = vha->req;
834 srb_t *spt;
835 int got_ref = 0;
836 836
837 fc_block_scsi_eh(cmd); 837 fc_block_scsi_eh(cmd);
838 838
839 if (!CMD_SP(cmd)) 839 if (!CMD_SP(cmd))
840 return SUCCESS; 840 return SUCCESS;
841 841
842 ret = SUCCESS;
843
844 id = cmd->device->id; 842 id = cmd->device->id;
845 lun = cmd->device->lun; 843 lun = cmd->device->lun;
846 serial = cmd->serial_number;
847 spt = (srb_t *) CMD_SP(cmd);
848 if (!spt)
849 return SUCCESS;
850 844
851 /* Check active list for command command. */
852 spin_lock_irqsave(&ha->hardware_lock, flags); 845 spin_lock_irqsave(&ha->hardware_lock, flags);
853 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 846 sp = (srb_t *) CMD_SP(cmd);
854 sp = req->outstanding_cmds[i]; 847 if (!sp) {
855 848 spin_unlock_irqrestore(&ha->hardware_lock, flags);
856 if (sp == NULL) 849 return SUCCESS;
857 continue; 850 }
858 if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
859 !IS_PROT_IO(sp))
860 continue;
861 if (sp->cmd != cmd)
862 continue;
863 851
864 DEBUG2(printk("%s(%ld): aborting sp %p from RISC." 852 DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
865 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 853 __func__, vha->host_no, sp));
866 854
867 /* Get a reference to the sp and drop the lock.*/ 855 /* Get a reference to the sp and drop the lock.*/
868 sp_get(sp); 856 sp_get(sp);
869 got_ref++;
870 857
871 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 if (ha->isp_ops->abort_command(sp)) {
873 DEBUG2(printk("%s(%ld): abort_command "
874 "mbx failed.\n", __func__, vha->host_no));
875 ret = FAILED;
876 } else {
877 DEBUG3(printk("%s(%ld): abort_command "
878 "mbx success.\n", __func__, vha->host_no));
879 wait = 1;
880 }
881 spin_lock_irqsave(&ha->hardware_lock, flags);
882 break;
883 }
884 spin_unlock_irqrestore(&ha->hardware_lock, flags); 858 spin_unlock_irqrestore(&ha->hardware_lock, flags);
859 if (ha->isp_ops->abort_command(sp)) {
860 DEBUG2(printk("%s(%ld): abort_command "
861 "mbx failed.\n", __func__, vha->host_no));
862 ret = FAILED;
863 } else {
864 DEBUG3(printk("%s(%ld): abort_command "
865 "mbx success.\n", __func__, vha->host_no));
866 wait = 1;
867 }
868 qla2x00_sp_compl(ha, sp);
885 869
886 /* Wait for the command to be returned. */ 870 /* Wait for the command to be returned. */
887 if (wait) { 871 if (wait) {
888 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 872 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
889 qla_printk(KERN_ERR, ha, 873 qla_printk(KERN_ERR, ha,
890 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 874 "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
891 "%x.\n", vha->host_no, id, lun, serial, ret); 875 vha->host_no, id, lun, ret);
892 ret = FAILED; 876 ret = FAILED;
893 } 877 }
894 } 878 }
895 879
896 if (got_ref)
897 qla2x00_sp_compl(ha, sp);
898
899 qla_printk(KERN_INFO, ha, 880 qla_printk(KERN_INFO, ha,
900 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 881 "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
901 vha->host_no, id, lun, wait, serial, ret); 882 vha->host_no, id, lun, wait, ret);
902 883
903 return ret; 884 return ret;
904} 885}
@@ -1043,13 +1024,11 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1043 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1024 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1044 int ret = FAILED; 1025 int ret = FAILED;
1045 unsigned int id, lun; 1026 unsigned int id, lun;
1046 unsigned long serial;
1047 1027
1048 fc_block_scsi_eh(cmd); 1028 fc_block_scsi_eh(cmd);
1049 1029
1050 id = cmd->device->id; 1030 id = cmd->device->id;
1051 lun = cmd->device->lun; 1031 lun = cmd->device->lun;
1052 serial = cmd->serial_number;
1053 1032
1054 if (!fcport) 1033 if (!fcport)
1055 return ret; 1034 return ret;
@@ -1104,14 +1083,12 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1104 struct qla_hw_data *ha = vha->hw; 1083 struct qla_hw_data *ha = vha->hw;
1105 int ret = FAILED; 1084 int ret = FAILED;
1106 unsigned int id, lun; 1085 unsigned int id, lun;
1107 unsigned long serial;
1108 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1086 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1109 1087
1110 fc_block_scsi_eh(cmd); 1088 fc_block_scsi_eh(cmd);
1111 1089
1112 id = cmd->device->id; 1090 id = cmd->device->id;
1113 lun = cmd->device->lun; 1091 lun = cmd->device->lun;
1114 serial = cmd->serial_number;
1115 1092
1116 if (!fcport) 1093 if (!fcport)
1117 return ret; 1094 return ret;
@@ -1974,6 +1951,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1974 ha->bars = bars; 1951 ha->bars = bars;
1975 ha->mem_only = mem_only; 1952 ha->mem_only = mem_only;
1976 spin_lock_init(&ha->hardware_lock); 1953 spin_lock_init(&ha->hardware_lock);
1954 spin_lock_init(&ha->vport_slock);
1977 1955
1978 /* Set ISP-type information. */ 1956 /* Set ISP-type information. */
1979 qla2x00_set_isp_flags(ha); 1957 qla2x00_set_isp_flags(ha);
@@ -2342,6 +2320,42 @@ probe_out:
2342} 2320}
2343 2321
2344static void 2322static void
2323qla2x00_shutdown(struct pci_dev *pdev)
2324{
2325 scsi_qla_host_t *vha;
2326 struct qla_hw_data *ha;
2327
2328 vha = pci_get_drvdata(pdev);
2329 ha = vha->hw;
2330
2331 /* Turn-off FCE trace */
2332 if (ha->flags.fce_enabled) {
2333 qla2x00_disable_fce_trace(vha, NULL, NULL);
2334 ha->flags.fce_enabled = 0;
2335 }
2336
2337 /* Turn-off EFT trace */
2338 if (ha->eft)
2339 qla2x00_disable_eft_trace(vha);
2340
2341 /* Stop currently executing firmware. */
2342 qla2x00_try_to_stop_firmware(vha);
2343
2344 /* Turn adapter off line */
2345 vha->flags.online = 0;
2346
2347 /* turn-off interrupts on the card */
2348 if (ha->interrupts_on) {
2349 vha->flags.init_done = 0;
2350 ha->isp_ops->disable_intrs(ha);
2351 }
2352
2353 qla2x00_free_irqs(vha);
2354
2355 qla2x00_free_fw_dump(ha);
2356}
2357
2358static void
2345qla2x00_remove_one(struct pci_dev *pdev) 2359qla2x00_remove_one(struct pci_dev *pdev)
2346{ 2360{
2347 scsi_qla_host_t *base_vha, *vha; 2361 scsi_qla_host_t *base_vha, *vha;
@@ -2597,12 +2611,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2597 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2611 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2598 continue; 2612 continue;
2599 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2613 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2614 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2600 if (defer) 2615 if (defer)
2601 qla2x00_schedule_rport_del(vha, fcport, defer); 2616 qla2x00_schedule_rport_del(vha, fcport, defer);
2602 else if (vha->vp_idx == fcport->vp_idx) 2617 else if (vha->vp_idx == fcport->vp_idx)
2603 qla2x00_schedule_rport_del(vha, fcport, defer); 2618 qla2x00_schedule_rport_del(vha, fcport, defer);
2604 } 2619 }
2605 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2606 } 2620 }
2607} 2621}
2608 2622
@@ -2830,28 +2844,48 @@ fail:
2830} 2844}
2831 2845
2832/* 2846/*
2833* qla2x00_mem_free 2847* qla2x00_free_fw_dump
2834* Frees all adapter allocated memory. 2848* Frees fw dump stuff.
2835* 2849*
2836* Input: 2850* Input:
2837* ha = adapter block pointer. 2851* ha = adapter block pointer.
2838*/ 2852*/
2839static void 2853static void
2840qla2x00_mem_free(struct qla_hw_data *ha) 2854qla2x00_free_fw_dump(struct qla_hw_data *ha)
2841{ 2855{
2842 if (ha->srb_mempool)
2843 mempool_destroy(ha->srb_mempool);
2844
2845 if (ha->fce) 2856 if (ha->fce)
2846 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2857 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2847 ha->fce_dma); 2858 ha->fce_dma);
2848 2859
2849 if (ha->fw_dump) { 2860 if (ha->fw_dump) {
2850 if (ha->eft) 2861 if (ha->eft)
2851 dma_free_coherent(&ha->pdev->dev, 2862 dma_free_coherent(&ha->pdev->dev,
2852 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2863 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2853 vfree(ha->fw_dump); 2864 vfree(ha->fw_dump);
2854 } 2865 }
2866 ha->fce = NULL;
2867 ha->fce_dma = 0;
2868 ha->eft = NULL;
2869 ha->eft_dma = 0;
2870 ha->fw_dump = NULL;
2871 ha->fw_dumped = 0;
2872 ha->fw_dump_reading = 0;
2873}
2874
2875/*
2876* qla2x00_mem_free
2877* Frees all adapter allocated memory.
2878*
2879* Input:
2880* ha = adapter block pointer.
2881*/
2882static void
2883qla2x00_mem_free(struct qla_hw_data *ha)
2884{
2885 qla2x00_free_fw_dump(ha);
2886
2887 if (ha->srb_mempool)
2888 mempool_destroy(ha->srb_mempool);
2855 2889
2856 if (ha->dcbx_tlv) 2890 if (ha->dcbx_tlv)
2857 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 2891 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
@@ -2925,8 +2959,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2925 2959
2926 ha->srb_mempool = NULL; 2960 ha->srb_mempool = NULL;
2927 ha->ctx_mempool = NULL; 2961 ha->ctx_mempool = NULL;
2928 ha->eft = NULL;
2929 ha->eft_dma = 0;
2930 ha->sns_cmd = NULL; 2962 ha->sns_cmd = NULL;
2931 ha->sns_cmd_dma = 0; 2963 ha->sns_cmd_dma = 0;
2932 ha->ct_sns = NULL; 2964 ha->ct_sns = NULL;
@@ -2946,10 +2978,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2946 2978
2947 ha->gid_list = NULL; 2979 ha->gid_list = NULL;
2948 ha->gid_list_dma = 0; 2980 ha->gid_list_dma = 0;
2949
2950 ha->fw_dump = NULL;
2951 ha->fw_dumped = 0;
2952 ha->fw_dump_reading = 0;
2953} 2981}
2954 2982
2955struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 2983struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3547,11 +3575,9 @@ void
3547qla2x00_timer(scsi_qla_host_t *vha) 3575qla2x00_timer(scsi_qla_host_t *vha)
3548{ 3576{
3549 unsigned long cpu_flags = 0; 3577 unsigned long cpu_flags = 0;
3550 fc_port_t *fcport;
3551 int start_dpc = 0; 3578 int start_dpc = 0;
3552 int index; 3579 int index;
3553 srb_t *sp; 3580 srb_t *sp;
3554 int t;
3555 uint16_t w; 3581 uint16_t w;
3556 struct qla_hw_data *ha = vha->hw; 3582 struct qla_hw_data *ha = vha->hw;
3557 struct req_que *req; 3583 struct req_que *req;
@@ -3567,34 +3593,6 @@ qla2x00_timer(scsi_qla_host_t *vha)
3567 /* Hardware read to raise pending EEH errors during mailbox waits. */ 3593 /* Hardware read to raise pending EEH errors during mailbox waits. */
3568 if (!pci_channel_offline(ha->pdev)) 3594 if (!pci_channel_offline(ha->pdev))
3569 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 3595 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3570 /*
3571 * Ports - Port down timer.
3572 *
3573 * Whenever, a port is in the LOST state we start decrementing its port
3574 * down timer every second until it reaches zero. Once it reaches zero
3575 * the port it marked DEAD.
3576 */
3577 t = 0;
3578 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3579 if (fcport->port_type != FCT_TARGET)
3580 continue;
3581
3582 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3583
3584 if (atomic_read(&fcport->port_down_timer) == 0)
3585 continue;
3586
3587 if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
3588 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
3589
3590 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
3591 "%d remaining\n",
3592 vha->host_no,
3593 t, atomic_read(&fcport->port_down_timer)));
3594 }
3595 t++;
3596 } /* End of for fcport */
3597
3598 3596
3599 /* Loop down handler. */ 3597 /* Loop down handler. */
3600 if (atomic_read(&vha->loop_down_timer) > 0 && 3598 if (atomic_read(&vha->loop_down_timer) > 0 &&
@@ -4079,6 +4077,7 @@ static struct pci_driver qla2xxx_pci_driver = {
4079 .id_table = qla2xxx_pci_tbl, 4077 .id_table = qla2xxx_pci_tbl,
4080 .probe = qla2x00_probe_one, 4078 .probe = qla2x00_probe_one,
4081 .remove = qla2x00_remove_one, 4079 .remove = qla2x00_remove_one,
4080 .shutdown = qla2x00_shutdown,
4082 .err_handler = &qla2xxx_err_handler, 4081 .err_handler = &qla2xxx_err_handler,
4083}; 4082};
4084 4083
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index cbceb0ebabf..edcf048215d 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -30,3 +30,104 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
30 printk(KERN_INFO "\n"); 30 printk(KERN_INFO "\n");
31} 31}
32 32
33void qla4xxx_dump_registers(struct scsi_qla_host *ha)
34{
35 uint8_t i;
36
37 if (is_qla8022(ha)) {
38 for (i = 1; i < MBOX_REG_COUNT; i++)
39 printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
40 i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
41 return;
42 }
43
44 for (i = 0; i < MBOX_REG_COUNT; i++) {
45 printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
46 (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
47 readw(&ha->reg->mailbox[i]));
48 }
49
50 printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
51 (uint8_t) offsetof(struct isp_reg, flash_address),
52 readw(&ha->reg->flash_address));
53 printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
54 (uint8_t) offsetof(struct isp_reg, flash_data),
55 readw(&ha->reg->flash_data));
56 printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
57 (uint8_t) offsetof(struct isp_reg, ctrl_status),
58 readw(&ha->reg->ctrl_status));
59
60 if (is_qla4010(ha)) {
61 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
62 (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
63 readw(&ha->reg->u1.isp4010.nvram));
64 } else if (is_qla4022(ha) | is_qla4032(ha)) {
65 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
66 (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
67 readw(&ha->reg->u1.isp4022.intr_mask));
68 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
69 (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
70 readw(&ha->reg->u1.isp4022.nvram));
71 printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
72 (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
73 readw(&ha->reg->u1.isp4022.semaphore));
74 }
75 printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
76 (uint8_t) offsetof(struct isp_reg, req_q_in),
77 readw(&ha->reg->req_q_in));
78 printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
79 (uint8_t) offsetof(struct isp_reg, rsp_q_out),
80 readw(&ha->reg->rsp_q_out));
81
82 if (is_qla4010(ha)) {
83 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
84 (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
85 readw(&ha->reg->u2.isp4010.ext_hw_conf));
86 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
87 (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
88 readw(&ha->reg->u2.isp4010.port_ctrl));
89 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
90 (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
91 readw(&ha->reg->u2.isp4010.port_status));
92 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
93 (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
94 readw(&ha->reg->u2.isp4010.req_q_out));
95 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
96 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
97 readw(&ha->reg->u2.isp4010.gp_out));
98 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
99 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
100 readw(&ha->reg->u2.isp4010.gp_in));
101 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
102 offsetof(struct isp_reg, u2.isp4010.port_err_status),
103 readw(&ha->reg->u2.isp4010.port_err_status));
104 } else if (is_qla4022(ha) | is_qla4032(ha)) {
105 printk(KERN_INFO "Page 0 Registers:\n");
106 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t)
107 offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
108 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
109 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t)
110 offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
111 readw(&ha->reg->u2.isp4022.p0.port_ctrl));
112 printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t)
113 offsetof(struct isp_reg, u2.isp4022.p0.port_status),
114 readw(&ha->reg->u2.isp4022.p0.port_status));
115 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
116 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
117 readw(&ha->reg->u2.isp4022.p0.gp_out));
118 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
119 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
120 readw(&ha->reg->u2.isp4022.p0.gp_in));
121 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
122 offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
123 readw(&ha->reg->u2.isp4022.p0.port_err_status));
124 printk(KERN_INFO "Page 1 Registers:\n");
125 writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
126 &ha->reg->ctrl_status);
127 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
128 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
129 readw(&ha->reg->u2.isp4022.p1.req_q_out));
130 writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
131 &ha->reg->ctrl_status);
132 }
133}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 9dc0a6616ed..0f3bfc3da5c 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/aer.h>
27 28
28#include <net/tcp.h> 29#include <net/tcp.h>
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
@@ -36,24 +37,6 @@
36#include "ql4_dbg.h" 37#include "ql4_dbg.h"
37#include "ql4_nx.h" 38#include "ql4_nx.h"
38 39
39#if defined(CONFIG_PCIEAER)
40#include <linux/aer.h>
41#else
42/* AER releated */
43static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
44{
45 return -EINVAL;
46}
47static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
48{
49 return -EINVAL;
50}
51static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
52{
53 return -EINVAL;
54}
55#endif
56
57#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 40#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
58#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 41#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
59#endif 42#endif
@@ -179,6 +162,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
179#define IOCB_TOV_MARGIN 10 162#define IOCB_TOV_MARGIN 10
180#define RELOGIN_TOV 18 163#define RELOGIN_TOV 18
181#define ISNS_DEREG_TOV 5 164#define ISNS_DEREG_TOV 5
165#define HBA_ONLINE_TOV 30
182 166
183#define MAX_RESET_HA_RETRIES 2 167#define MAX_RESET_HA_RETRIES 2
184 168
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 0336c6db8cb..5e757d7fff7 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -416,6 +416,8 @@ struct qla_flt_region {
416#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C 416#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
417#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 417#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
418#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 418#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
419#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
420#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
419 421
420#define ISNS_EVENT_DATA_RECEIVED 0x0000 422#define ISNS_EVENT_DATA_RECEIVED 0x0000
421#define ISNS_EVENT_CONNECTION_OPENED 0x0001 423#define ISNS_EVENT_CONNECTION_OPENED 0x0001
@@ -446,6 +448,7 @@ struct addr_ctrl_blk {
446#define FWOPT_SESSION_MODE 0x0040 448#define FWOPT_SESSION_MODE 0x0040
447#define FWOPT_INITIATOR_MODE 0x0020 449#define FWOPT_INITIATOR_MODE 0x0020
448#define FWOPT_TARGET_MODE 0x0010 450#define FWOPT_TARGET_MODE 0x0010
451#define FWOPT_ENABLE_CRBDB 0x8000
449 452
450 uint16_t exec_throttle; /* 04-05 */ 453 uint16_t exec_throttle; /* 04-05 */
451 uint8_t zio_count; /* 06 */ 454 uint8_t zio_count; /* 06 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 95a26fb1626..6575a47501e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -94,6 +94,7 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
94void qla4xxx_wake_dpc(struct scsi_qla_host *ha); 94void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
95void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); 95void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
96void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); 96void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
97void qla4xxx_dump_registers(struct scsi_qla_host *ha);
97 98
98void qla4_8xxx_pci_config(struct scsi_qla_host *); 99void qla4_8xxx_pci_config(struct scsi_qla_host *);
99int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); 100int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 4c9be77ee70..dc01fa3da5d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1207,8 +1207,8 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
1207 break; 1207 break;
1208 1208
1209 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot " 1209 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
1210 "firmware to complete... ctrl_sts=0x%x\n", 1210 "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
1211 ha->host_no, __func__, ctrl_status)); 1211 ha->host_no, __func__, ctrl_status, max_wait_time));
1212 1212
1213 msleep_interruptible(250); 1213 msleep_interruptible(250);
1214 } while (!time_after_eq(jiffies, max_wait_time)); 1214 } while (!time_after_eq(jiffies, max_wait_time));
@@ -1459,6 +1459,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1459exit_init_online: 1459exit_init_online:
1460 set_bit(AF_ONLINE, &ha->flags); 1460 set_bit(AF_ONLINE, &ha->flags);
1461exit_init_hba: 1461exit_init_hba:
1462 if (is_qla8022(ha) && (status == QLA_ERROR)) {
1463 /* Since interrupts are registered in start_firmware for
1464 * 82xx, release them here if initialize_adapter fails */
1465 qla4xxx_free_irqs(ha);
1466 }
1467
1462 DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, 1468 DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
1463 status == QLA_ERROR ? "FAILED" : "SUCCEDED")); 1469 status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
1464 return status; 1470 return status;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 4ef9ba112ee..5ae49fd8784 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -202,19 +202,11 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
202void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha) 202void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
203{ 203{
204 uint32_t dbval = 0; 204 uint32_t dbval = 0;
205 unsigned long wtime;
206 205
207 dbval = 0x14 | (ha->func_num << 5); 206 dbval = 0x14 | (ha->func_num << 5);
208 dbval = dbval | (0 << 8) | (ha->request_in << 16); 207 dbval = dbval | (0 << 8) | (ha->request_in << 16);
209 writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
210 wmb();
211 208
212 wtime = jiffies + (2 * HZ); 209 qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
213 while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval &&
214 !time_after_eq(jiffies, wtime)) {
215 writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
216 wmb();
217 }
218} 210}
219 211
220/** 212/**
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2a1ab63f3eb..7c33fd5943d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -72,7 +72,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
72{ 72{
73 struct srb *srb = ha->status_srb; 73 struct srb *srb = ha->status_srb;
74 struct scsi_cmnd *cmd; 74 struct scsi_cmnd *cmd;
75 uint8_t sense_len; 75 uint16_t sense_len;
76 76
77 if (srb == NULL) 77 if (srb == NULL)
78 return; 78 return;
@@ -487,6 +487,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
487 case MBOX_ASTS_SYSTEM_ERROR: 487 case MBOX_ASTS_SYSTEM_ERROR:
488 /* Log Mailbox registers */ 488 /* Log Mailbox registers */
489 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); 489 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
490 qla4xxx_dump_registers(ha);
491
490 if (ql4xdontresethba) { 492 if (ql4xdontresethba) {
491 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", 493 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
492 ha->host_no, __func__)); 494 ha->host_no, __func__));
@@ -621,6 +623,18 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
621 } 623 }
622 break; 624 break;
623 625
626 case MBOX_ASTS_TXSCVR_INSERTED:
627 DEBUG2(printk(KERN_WARNING
628 "scsi%ld: AEN %04x Transceiver"
629 " inserted\n", ha->host_no, mbox_sts[0]));
630 break;
631
632 case MBOX_ASTS_TXSCVR_REMOVED:
633 DEBUG2(printk(KERN_WARNING
634 "scsi%ld: AEN %04x Transceiver"
635 " removed\n", ha->host_no, mbox_sts[0]));
636 break;
637
624 default: 638 default:
625 DEBUG2(printk(KERN_WARNING 639 DEBUG2(printk(KERN_WARNING
626 "scsi%ld: AEN %04x UNKNOWN\n", 640 "scsi%ld: AEN %04x UNKNOWN\n",
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 90021704d8c..2d2f9c879bf 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -299,6 +299,10 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
299{ 299{
300 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); 300 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
301 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); 301 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
302
303 if (is_qla8022(ha))
304 qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
305
302 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 306 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
303 mbox_cmd[1] = 0; 307 mbox_cmd[1] = 0;
304 mbox_cmd[2] = LSDW(init_fw_cb_dma); 308 mbox_cmd[2] = LSDW(init_fw_cb_dma);
@@ -472,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
472 init_fw_cb->fw_options |= 476 init_fw_cb->fw_options |=
473 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 477 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
474 FWOPT_INITIATOR_MODE); 478 FWOPT_INITIATOR_MODE);
479
480 if (is_qla8022(ha))
481 init_fw_cb->fw_options |=
482 __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
483
475 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 484 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
476 485
477 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) 486 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
@@ -592,7 +601,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
592 } 601 }
593 602
594 ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n", 603 ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
595 ha->host_no, mbox_cmd[2]); 604 ha->host_no, mbox_sts[2]);
596 605
597 return QLA_SUCCESS; 606 return QLA_SUCCESS;
598} 607}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 449256f2c5f..474b10d7136 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -839,8 +839,11 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
839 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 839 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
840 if (done == 1) 840 if (done == 1)
841 break; 841 break;
842 if (timeout >= qla4_8xxx_rom_lock_timeout) 842 if (timeout >= qla4_8xxx_rom_lock_timeout) {
843 ql4_printk(KERN_WARNING, ha,
844 "%s: Failed to acquire rom lock", __func__);
843 return -1; 845 return -1;
846 }
844 847
845 timeout++; 848 timeout++;
846 849
@@ -1078,21 +1081,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1078 return 0; 1081 return 0;
1079} 1082}
1080 1083
1081static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha)
1082{
1083 u32 val = 0;
1084 val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ;
1085 val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1086 if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1087 printk("Memory DIMM SPD not programmed. Assumed valid.\n");
1088 return 1;
1089 } else if (val) {
1090 printk("Memory DIMM type incorrect. Info:%08X.\n", val);
1091 return 2;
1092 }
1093 return 0;
1094}
1095
1096static int 1084static int
1097qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) 1085qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1098{ 1086{
@@ -1377,8 +1365,6 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1377 1365
1378 } while (--retries); 1366 } while (--retries);
1379 1367
1380 qla4_8xxx_check_for_bad_spd(ha);
1381
1382 if (!retries) { 1368 if (!retries) {
1383 pegtune_val = qla4_8xxx_rd_32(ha, 1369 pegtune_val = qla4_8xxx_rd_32(ha,
1384 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1370 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1540,14 +1526,31 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1540 ql4_printk(KERN_INFO, ha, 1526 ql4_printk(KERN_INFO, ha,
1541 "FW: Attempting to load firmware from flash...\n"); 1527 "FW: Attempting to load firmware from flash...\n");
1542 rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw); 1528 rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
1543 if (rval == QLA_SUCCESS)
1544 return rval;
1545 1529
1546 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n"); 1530 if (rval != QLA_SUCCESS) {
1531 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
1532 " FAILED...\n");
1533 return rval;
1534 }
1547 1535
1548 return rval; 1536 return rval;
1549} 1537}
1550 1538
1539static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
1540{
1541 if (qla4_8xxx_rom_lock(ha)) {
1542 /* Someone else is holding the lock. */
1543 dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
1544 }
1545
1546 /*
1547 * Either we got the lock, or someone
1548 * else died while holding it.
1549 * In either case, unlock.
1550 */
1551 qla4_8xxx_rom_unlock(ha);
1552}
1553
1551/** 1554/**
1552 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw 1555 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
1553 * @ha: pointer to adapter structure 1556 * @ha: pointer to adapter structure
@@ -1557,11 +1560,12 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1557static int 1560static int
1558qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) 1561qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
1559{ 1562{
1560 int rval, i, timeout; 1563 int rval = QLA_ERROR;
1564 int i, timeout;
1561 uint32_t old_count, count; 1565 uint32_t old_count, count;
1566 int need_reset = 0, peg_stuck = 1;
1562 1567
1563 if (qla4_8xxx_need_reset(ha)) 1568 need_reset = qla4_8xxx_need_reset(ha);
1564 goto dev_initialize;
1565 1569
1566 old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 1570 old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1567 1571
@@ -1570,12 +1574,30 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
1570 if (timeout) { 1574 if (timeout) {
1571 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 1575 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1572 QLA82XX_DEV_FAILED); 1576 QLA82XX_DEV_FAILED);
1573 return QLA_ERROR; 1577 return rval;
1574 } 1578 }
1575 1579
1576 count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 1580 count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
1577 if (count != old_count) 1581 if (count != old_count)
1582 peg_stuck = 0;
1583 }
1584
1585 if (need_reset) {
1586 /* We are trying to perform a recovery here. */
1587 if (peg_stuck)
1588 qla4_8xxx_rom_lock_recovery(ha);
1589 goto dev_initialize;
1590 } else {
1591 /* Start of day for this ha context. */
1592 if (peg_stuck) {
1593 /* Either we are the first or recovery in progress. */
1594 qla4_8xxx_rom_lock_recovery(ha);
1595 goto dev_initialize;
1596 } else {
1597 /* Firmware already running. */
1598 rval = QLA_SUCCESS;
1578 goto dev_ready; 1599 goto dev_ready;
1600 }
1579 } 1601 }
1580 1602
1581dev_initialize: 1603dev_initialize:
@@ -1601,7 +1623,7 @@ dev_ready:
1601 ql4_printk(KERN_INFO, ha, "HW State: READY\n"); 1623 ql4_printk(KERN_INFO, ha, "HW State: READY\n");
1602 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 1624 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
1603 1625
1604 return QLA_SUCCESS; 1626 return rval;
1605} 1627}
1606 1628
1607/** 1629/**
@@ -1764,20 +1786,9 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
1764 int retval; 1786 int retval;
1765 retval = qla4_8xxx_device_state_handler(ha); 1787 retval = qla4_8xxx_device_state_handler(ha);
1766 1788
1767 if (retval == QLA_SUCCESS && 1789 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
1768 !test_bit(AF_INIT_DONE, &ha->flags)) {
1769 retval = qla4xxx_request_irqs(ha); 1790 retval = qla4xxx_request_irqs(ha);
1770 if (retval != QLA_SUCCESS) { 1791
1771 ql4_printk(KERN_WARNING, ha,
1772 "Failed to reserve interrupt %d already in use.\n",
1773 ha->pdev->irq);
1774 } else {
1775 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1776 ha->host->irq = ha->pdev->irq;
1777 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1778 __func__, ha->pdev->irq);
1779 }
1780 }
1781 return retval; 1792 return retval;
1782} 1793}
1783 1794
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 931ad3f1e91..ff689bf5300 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -24,7 +24,6 @@
24 24
25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50) 25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 27#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29 28
30#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 29#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -529,12 +528,12 @@
529# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) 528# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
530# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) 529# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
531 530
532#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
533#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
534#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) 531#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
535#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) 532#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
536#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) 533#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
537#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0)) 534#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
535#define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0))
536#define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4))
538 537
539#define HALT_STATUS_UNRECOVERABLE 0x80000000 538#define HALT_STATUS_UNRECOVERABLE 0x80000000
540#define HALT_STATUS_RECOVERABLE 0x40000000 539#define HALT_STATUS_RECOVERABLE 0x40000000
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 370d40ff152..f4cd846abf6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -167,8 +167,6 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
167 "of (%d) secs exhausted, marking device DEAD.\n", 167 "of (%d) secs exhausted, marking device DEAD.\n",
168 ha->host_no, __func__, ddb_entry->fw_ddb_index, 168 ha->host_no, __func__, ddb_entry->fw_ddb_index,
169 QL4_SESS_RECOVERY_TMO)); 169 QL4_SESS_RECOVERY_TMO));
170
171 qla4xxx_wake_dpc(ha);
172 } 170 }
173} 171}
174 172
@@ -573,10 +571,6 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
573 if (ha->nx_pcibase) 571 if (ha->nx_pcibase)
574 iounmap( 572 iounmap(
575 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 573 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
576
577 if (ha->nx_db_wr_ptr)
578 iounmap(
579 (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr);
580 } else if (ha->reg) 574 } else if (ha->reg)
581 iounmap(ha->reg); 575 iounmap(ha->reg);
582 pci_release_regions(ha->pdev); 576 pci_release_regions(ha->pdev);
@@ -692,7 +686,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
692 qla4xxx_wake_dpc(ha); 686 qla4xxx_wake_dpc(ha);
693 qla4xxx_mailbox_premature_completion(ha); 687 qla4xxx_mailbox_premature_completion(ha);
694 } 688 }
695 } 689 } else
690 ha->seconds_since_last_heartbeat = 0;
691
696 ha->fw_heartbeat_counter = fw_heartbeat_counter; 692 ha->fw_heartbeat_counter = fw_heartbeat_counter;
697} 693}
698 694
@@ -885,7 +881,13 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
885 /* Find a command that hasn't completed. */ 881 /* Find a command that hasn't completed. */
886 for (index = 0; index < ha->host->can_queue; index++) { 882 for (index = 0; index < ha->host->can_queue; index++) {
887 cmd = scsi_host_find_tag(ha->host, index); 883 cmd = scsi_host_find_tag(ha->host, index);
888 if (cmd != NULL) 884 /*
885 * We cannot just check if the index is valid,
886 * becase if we are run from the scsi eh, then
887 * the scsi/block layer is going to prevent
888 * the tag from being released.
889 */
890 if (cmd != NULL && CMD_SP(cmd))
889 break; 891 break;
890 } 892 }
891 spin_unlock_irqrestore(&ha->hardware_lock, flags); 893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -937,11 +939,14 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha)
937{ 939{
938 uint32_t max_wait_time; 940 uint32_t max_wait_time;
939 unsigned long flags = 0; 941 unsigned long flags = 0;
940 int status = QLA_ERROR; 942 int status;
941 uint32_t ctrl_status; 943 uint32_t ctrl_status;
942 944
943 qla4xxx_hw_reset(ha); 945 status = qla4xxx_hw_reset(ha);
946 if (status != QLA_SUCCESS)
947 return status;
944 948
949 status = QLA_ERROR;
945 /* Wait until the Network Reset Intr bit is cleared */ 950 /* Wait until the Network Reset Intr bit is cleared */
946 max_wait_time = RESET_INTR_TOV; 951 max_wait_time = RESET_INTR_TOV;
947 do { 952 do {
@@ -1101,7 +1106,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
1101 ha->host_no, __func__)); 1106 ha->host_no, __func__));
1102 status = ha->isp_ops->reset_firmware(ha); 1107 status = ha->isp_ops->reset_firmware(ha);
1103 if (status == QLA_SUCCESS) { 1108 if (status == QLA_SUCCESS) {
1104 qla4xxx_cmd_wait(ha); 1109 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
1110 qla4xxx_cmd_wait(ha);
1105 ha->isp_ops->disable_intrs(ha); 1111 ha->isp_ops->disable_intrs(ha);
1106 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1112 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1107 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 1113 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -1118,7 +1124,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
1118 * or if stop_firmware fails for ISP-82xx. 1124 * or if stop_firmware fails for ISP-82xx.
1119 * This is the default case for ISP-4xxx */ 1125 * This is the default case for ISP-4xxx */
1120 if (!is_qla8022(ha) || reset_chip) { 1126 if (!is_qla8022(ha) || reset_chip) {
1121 qla4xxx_cmd_wait(ha); 1127 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
1128 qla4xxx_cmd_wait(ha);
1122 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 1129 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1123 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 1130 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
1124 DEBUG2(ql4_printk(KERN_INFO, ha, 1131 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -1471,24 +1478,10 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
1471 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 1478 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
1472 db_len = pci_resource_len(pdev, 4); 1479 db_len = pci_resource_len(pdev, 4);
1473 1480
1474 /* mapping of doorbell write pointer */ 1481 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
1475 ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base + 1482 QLA82XX_CAM_RAM_DB2);
1476 (ha->pdev->devfn << 12), 4);
1477 if (!ha->nx_db_wr_ptr) {
1478 printk(KERN_ERR
1479 "cannot remap MMIO doorbell-write (%s), aborting\n",
1480 pci_name(pdev));
1481 goto iospace_error_exit;
1482 }
1483 /* mapping of doorbell read pointer */
1484 ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1485 (ha->pdev->devfn * 8);
1486 if (!ha->nx_db_rd_ptr)
1487 printk(KERN_ERR
1488 "cannot remap MMIO doorbell-read (%s), aborting\n",
1489 pci_name(pdev));
1490 return 0;
1491 1483
1484 return 0;
1492iospace_error_exit: 1485iospace_error_exit:
1493 return -ENOMEM; 1486 return -ENOMEM;
1494} 1487}
@@ -1960,13 +1953,11 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
1960{ 1953{
1961 unsigned long wait_online; 1954 unsigned long wait_online;
1962 1955
1963 wait_online = jiffies + (30 * HZ); 1956 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
1964 while (time_before(jiffies, wait_online)) { 1957 while (time_before(jiffies, wait_online)) {
1965 1958
1966 if (adapter_up(ha)) 1959 if (adapter_up(ha))
1967 return QLA_SUCCESS; 1960 return QLA_SUCCESS;
1968 else if (ha->retry_reset_ha_cnt == 0)
1969 return QLA_ERROR;
1970 1961
1971 msleep(2000); 1962 msleep(2000);
1972 } 1963 }
@@ -2021,6 +2012,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
2021 unsigned int id = cmd->device->id; 2012 unsigned int id = cmd->device->id;
2022 unsigned int lun = cmd->device->lun; 2013 unsigned int lun = cmd->device->lun;
2023 unsigned long serial = cmd->serial_number; 2014 unsigned long serial = cmd->serial_number;
2015 unsigned long flags;
2024 struct srb *srb = NULL; 2016 struct srb *srb = NULL;
2025 int ret = SUCCESS; 2017 int ret = SUCCESS;
2026 int wait = 0; 2018 int wait = 0;
@@ -2029,12 +2021,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
2029 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", 2021 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
2030 ha->host_no, id, lun, cmd, serial); 2022 ha->host_no, id, lun, cmd, serial);
2031 2023
2024 spin_lock_irqsave(&ha->hardware_lock, flags);
2032 srb = (struct srb *) CMD_SP(cmd); 2025 srb = (struct srb *) CMD_SP(cmd);
2033 2026 if (!srb) {
2034 if (!srb) 2027 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2035 return SUCCESS; 2028 return SUCCESS;
2036 2029 }
2037 kref_get(&srb->srb_ref); 2030 kref_get(&srb->srb_ref);
2031 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2038 2032
2039 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { 2033 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
2040 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n", 2034 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
@@ -2267,6 +2261,8 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2267 qla4xxx_mailbox_premature_completion(ha); 2261 qla4xxx_mailbox_premature_completion(ha);
2268 qla4xxx_free_irqs(ha); 2262 qla4xxx_free_irqs(ha);
2269 pci_disable_device(pdev); 2263 pci_disable_device(pdev);
2264 /* Return back all IOs */
2265 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2270 return PCI_ERS_RESULT_NEED_RESET; 2266 return PCI_ERS_RESULT_NEED_RESET;
2271 case pci_channel_io_perm_failure: 2267 case pci_channel_io_perm_failure:
2272 set_bit(AF_EEH_BUSY, &ha->flags); 2268 set_bit(AF_EEH_BUSY, &ha->flags);
@@ -2290,17 +2286,13 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
2290 if (!is_aer_supported(ha)) 2286 if (!is_aer_supported(ha))
2291 return PCI_ERS_RESULT_NONE; 2287 return PCI_ERS_RESULT_NONE;
2292 2288
2293 if (test_bit(AF_FW_RECOVERY, &ha->flags)) { 2289 return PCI_ERS_RESULT_RECOVERED;
2294 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- "
2295 "mmio_enabled\n", ha->host_no, __func__);
2296 return PCI_ERS_RESULT_NEED_RESET;
2297 } else
2298 return PCI_ERS_RESULT_RECOVERED;
2299} 2290}
2300 2291
2301uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) 2292static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2302{ 2293{
2303 uint32_t rval = QLA_ERROR; 2294 uint32_t rval = QLA_ERROR;
2295 uint32_t ret = 0;
2304 int fn; 2296 int fn;
2305 struct pci_dev *other_pdev = NULL; 2297 struct pci_dev *other_pdev = NULL;
2306 2298
@@ -2312,7 +2304,6 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2312 clear_bit(AF_ONLINE, &ha->flags); 2304 clear_bit(AF_ONLINE, &ha->flags);
2313 qla4xxx_mark_all_devices_missing(ha); 2305 qla4xxx_mark_all_devices_missing(ha);
2314 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 2306 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2315 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2316 } 2307 }
2317 2308
2318 fn = PCI_FUNC(ha->pdev->devfn); 2309 fn = PCI_FUNC(ha->pdev->devfn);
@@ -2375,7 +2366,16 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2375 /* Clear driver state register */ 2366 /* Clear driver state register */
2376 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 2367 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
2377 qla4_8xxx_set_drv_active(ha); 2368 qla4_8xxx_set_drv_active(ha);
2378 ha->isp_ops->enable_intrs(ha); 2369 ret = qla4xxx_request_irqs(ha);
2370 if (ret) {
2371 ql4_printk(KERN_WARNING, ha, "Failed to "
2372 "reserve interrupt %d already in use.\n",
2373 ha->pdev->irq);
2374 rval = QLA_ERROR;
2375 } else {
2376 ha->isp_ops->enable_intrs(ha);
2377 rval = QLA_SUCCESS;
2378 }
2379 } 2379 }
2380 qla4_8xxx_idc_unlock(ha); 2380 qla4_8xxx_idc_unlock(ha);
2381 } else { 2381 } else {
@@ -2387,8 +2387,18 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2387 clear_bit(AF_FW_RECOVERY, &ha->flags); 2387 clear_bit(AF_FW_RECOVERY, &ha->flags);
2388 rval = qla4xxx_initialize_adapter(ha, 2388 rval = qla4xxx_initialize_adapter(ha,
2389 PRESERVE_DDB_LIST); 2389 PRESERVE_DDB_LIST);
2390 if (rval == QLA_SUCCESS) 2390 if (rval == QLA_SUCCESS) {
2391 ha->isp_ops->enable_intrs(ha); 2391 ret = qla4xxx_request_irqs(ha);
2392 if (ret) {
2393 ql4_printk(KERN_WARNING, ha, "Failed to"
2394 " reserve interrupt %d already in"
2395 " use.\n", ha->pdev->irq);
2396 rval = QLA_ERROR;
2397 } else {
2398 ha->isp_ops->enable_intrs(ha);
2399 rval = QLA_SUCCESS;
2400 }
2401 }
2392 qla4_8xxx_idc_lock(ha); 2402 qla4_8xxx_idc_lock(ha);
2393 qla4_8xxx_set_drv_active(ha); 2403 qla4_8xxx_set_drv_active(ha);
2394 qla4_8xxx_idc_unlock(ha); 2404 qla4_8xxx_idc_unlock(ha);
@@ -2430,12 +2440,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
2430 goto exit_slot_reset; 2440 goto exit_slot_reset;
2431 } 2441 }
2432 2442
2433 ret = qla4xxx_request_irqs(ha); 2443 ha->isp_ops->disable_intrs(ha);
2434 if (ret) {
2435 ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
2436 " already in use.\n", pdev->irq);
2437 goto exit_slot_reset;
2438 }
2439 2444
2440 if (is_qla8022(ha)) { 2445 if (is_qla8022(ha)) {
2441 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 2446 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index a77b973f2cb..9bfacf4ed13 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k3" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k4"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8041fe1ab17..eafeeda6e19 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2438,7 +2438,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
2438 sdev->sdev_state = SDEV_RUNNING; 2438 sdev->sdev_state = SDEV_RUNNING;
2439 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) 2439 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2440 sdev->sdev_state = SDEV_CREATED; 2440 sdev->sdev_state = SDEV_CREATED;
2441 else 2441 else if (sdev->sdev_state != SDEV_CANCEL &&
2442 sdev->sdev_state != SDEV_OFFLINE)
2442 return -EINVAL; 2443 return -EINVAL;
2443 2444
2444 spin_lock_irqsave(q->queue_lock, flags); 2445 spin_lock_irqsave(q->queue_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 20ad59dff73..76ee2e784f7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -964,10 +964,11 @@ static void __scsi_remove_target(struct scsi_target *starget)
964 list_for_each_entry(sdev, &shost->__devices, siblings) { 964 list_for_each_entry(sdev, &shost->__devices, siblings) {
965 if (sdev->channel != starget->channel || 965 if (sdev->channel != starget->channel ||
966 sdev->id != starget->id || 966 sdev->id != starget->id ||
967 sdev->sdev_state == SDEV_DEL) 967 scsi_device_get(sdev))
968 continue; 968 continue;
969 spin_unlock_irqrestore(shost->host_lock, flags); 969 spin_unlock_irqrestore(shost->host_lock, flags);
970 scsi_remove_device(sdev); 970 scsi_remove_device(sdev);
971 scsi_device_put(sdev);
971 spin_lock_irqsave(shost->host_lock, flags); 972 spin_lock_irqsave(shost->host_lock, flags);
972 goto restart; 973 goto restart;
973 } 974 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 57d1e3e1bd4..b9ab3a590e4 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -259,6 +259,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
259} 259}
260 260
261static ssize_t 261static ssize_t
262sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
263 char *buf)
264{
265 struct scsi_disk *sdkp = to_scsi_disk(dev);
266 struct scsi_device *sdp = sdkp->device;
267 unsigned int dif, dix;
268
269 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
270 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
271
272 if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
273 dif = 0;
274 dix = 1;
275 }
276
277 if (!dif && !dix)
278 return snprintf(buf, 20, "none\n");
279
280 return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
281}
282
283static ssize_t
262sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, 284sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
263 char *buf) 285 char *buf)
264{ 286{
@@ -285,6 +307,7 @@ static struct device_attribute sd_disk_attrs[] = {
285 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 307 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
286 sd_store_manage_start_stop), 308 sd_store_manage_start_stop),
287 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 309 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
310 __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
288 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 311 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
289 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), 312 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
290 __ATTR_NULL, 313 __ATTR_NULL,
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index cbb38c5197f..3cd8ffbad57 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -325,6 +325,15 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
325 } 325 }
326 326
327 /* 327 /*
328 * SK/ASC/ASCQ of 2/4/2 means "initialization required"
329 * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close
330 * the tray, which resolves the initialization requirement.
331 */
332 if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
333 && sshdr.asc == 0x04 && sshdr.ascq == 0x02)
334 return CDS_TRAY_OPEN;
335
336 /*
328 * 0x04 is format in progress .. but there must be a disc present! 337 * 0x04 is format in progress .. but there must be a disc present!
329 */ 338 */
330 if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04) 339 if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 14be49b44e8..f986ab7ffe6 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -721,7 +721,7 @@ struct libfc_function_template {
721 * struct fc_disc - Discovery context 721 * struct fc_disc - Discovery context
722 * @retry_count: Number of retries 722 * @retry_count: Number of retries
723 * @pending: 1 if discovery is pending, 0 if not 723 * @pending: 1 if discovery is pending, 0 if not
724 * @requesting: 1 if discovery has been requested, 0 if not 724 * @requested: 1 if discovery has been requested, 0 if not
725 * @seq_count: Number of sequences used for discovery 725 * @seq_count: Number of sequences used for discovery
726 * @buf_len: Length of the discovery buffer 726 * @buf_len: Length of the discovery buffer
727 * @disc_id: Discovery ID 727 * @disc_id: Discovery ID
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index a8f37012663..53a9e886612 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -137,7 +137,7 @@ struct osd_request {
137 void *buff; 137 void *buff;
138 unsigned alloc_size; /* 0 here means: don't call kfree */ 138 unsigned alloc_size; /* 0 here means: don't call kfree */
139 unsigned total_bytes; 139 unsigned total_bytes;
140 } set_attr, enc_get_attr, get_attr; 140 } cdb_cont, set_attr, enc_get_attr, get_attr;
141 141
142 struct _osd_io_info { 142 struct _osd_io_info {
143 struct bio *bio; 143 struct bio *bio;
@@ -448,6 +448,20 @@ void osd_req_read(struct osd_request *or,
448int osd_req_read_kern(struct osd_request *or, 448int osd_req_read_kern(struct osd_request *or,
449 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); 449 const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
450 450
451/* Scatter/Gather write/read commands */
452int osd_req_write_sg(struct osd_request *or,
453 const struct osd_obj_id *obj, struct bio *bio,
454 const struct osd_sg_entry *sglist, unsigned numentries);
455int osd_req_read_sg(struct osd_request *or,
456 const struct osd_obj_id *obj, struct bio *bio,
457 const struct osd_sg_entry *sglist, unsigned numentries);
458int osd_req_write_sg_kern(struct osd_request *or,
459 const struct osd_obj_id *obj, void **buff,
460 const struct osd_sg_entry *sglist, unsigned numentries);
461int osd_req_read_sg_kern(struct osd_request *or,
462 const struct osd_obj_id *obj, void **buff,
463 const struct osd_sg_entry *sglist, unsigned numentries);
464
451/* 465/*
452 * Root/Partition/Collection/Object Attributes commands 466 * Root/Partition/Collection/Object Attributes commands
453 */ 467 */
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index 68566128354..a6026da25f3 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -631,4 +631,46 @@ static inline void osd_sec_set_caps(struct osd_capability_head *cap,
631 put_unaligned_le16(bit_mask, &cap->permissions_bit_mask); 631 put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
632} 632}
633 633
634/* osd2r05a sec 5.3: CDB continuation segment formats */
635enum osd_continuation_segment_format {
636 CDB_CONTINUATION_FORMAT_V2 = 0x01,
637};
638
639struct osd_continuation_segment_header {
640 u8 format;
641 u8 reserved1;
642 __be16 service_action;
643 __be32 reserved2;
644 u8 integrity_check[OSDv2_CRYPTO_KEYID_SIZE];
645} __packed;
646
647/* osd2r05a sec 5.4.1: CDB continuation descriptors */
648enum osd_continuation_descriptor_type {
649 NO_MORE_DESCRIPTORS = 0x0000,
650 SCATTER_GATHER_LIST = 0x0001,
651 QUERY_LIST = 0x0002,
652 USER_OBJECT = 0x0003,
653 COPY_USER_OBJECT_SOURCE = 0x0101,
654 EXTENSION_CAPABILITIES = 0xFFEE
655};
656
657struct osd_continuation_descriptor_header {
658 __be16 type;
659 u8 reserved;
660 u8 pad_length;
661 __be32 length;
662} __packed;
663
664
665/* osd2r05a sec 5.4.2: Scatter/gather list */
666struct osd_sg_list_entry {
667 __be64 offset;
668 __be64 len;
669};
670
671struct osd_sg_continuation_descriptor {
672 struct osd_continuation_descriptor_header hdr;
673 struct osd_sg_list_entry entries[];
674};
675
634#endif /* ndef __OSD_PROTOCOL_H__ */ 676#endif /* ndef __OSD_PROTOCOL_H__ */
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
index 3f5e88cc75c..bd0be7ed4bc 100644
--- a/include/scsi/osd_types.h
+++ b/include/scsi/osd_types.h
@@ -37,4 +37,9 @@ struct osd_attr {
37 void *val_ptr; /* in network order */ 37 void *val_ptr; /* in network order */
38}; 38};
39 39
40struct osd_sg_entry {
41 u64 offset;
42 u64 len;
43};
44
40#endif /* ndef __OSD_TYPES_H__ */ 45#endif /* ndef __OSD_TYPES_H__ */