aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJing Huang <huangj@brocade.com>2010-10-18 20:17:23 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-10-25 17:28:09 -0400
commit5fbe25c7a664601666895e8f95eaa59bd9741392 (patch)
treef2d452938ba5ba924d0e8cc4f173f963df13018e /drivers/scsi
parentacdc79a60cb3cbbc9f07bb5032d890e9cf94f0ff (diff)
[SCSI] bfa: fix comments for c files
This patch addresses the comments from Randy Dunlap (Randy.Dunlap@oracle.com) regarding comment blocks that begining with "/**". bfa driver comments currently do not follow kernel-doc convention, we hence replace all /** with /* and **/ with */. Signed-off-by: Jing Huang <huangj@brocade.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/bfa/bfa_core.c112
-rw-r--r--drivers/scsi/bfa/bfa_drv.c6
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c300
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c134
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c30
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c368
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c170
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c6
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c6
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c238
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c42
-rw-r--r--drivers/scsi/bfa/bfa_port.c32
-rw-r--r--drivers/scsi/bfa/bfa_svc.c378
-rw-r--r--drivers/scsi/bfa/bfad.c32
-rw-r--r--drivers/scsi/bfa/bfad_attr.c30
-rw-r--r--drivers/scsi/bfa/bfad_im.c38
18 files changed, 943 insertions, 1003 deletions
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index f75c6be7b843..2345f48dc57f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -21,11 +21,11 @@
21 21
22BFA_TRC_FILE(HAL, CORE); 22BFA_TRC_FILE(HAL, CORE);
23 23
24/** 24/*
25 * BFA IOC FC related definitions 25 * BFA IOC FC related definitions
26 */ 26 */
27 27
28/** 28/*
29 * IOC local definitions 29 * IOC local definitions
30 */ 30 */
31#define BFA_IOCFC_TOV 5000 /* msecs */ 31#define BFA_IOCFC_TOV 5000 /* msecs */
@@ -54,7 +54,7 @@ enum {
54#define DEF_CFG_NUM_SBOOT_TGTS 16 54#define DEF_CFG_NUM_SBOOT_TGTS 16
55#define DEF_CFG_NUM_SBOOT_LUNS 16 55#define DEF_CFG_NUM_SBOOT_LUNS 16
56 56
57/** 57/*
58 * forward declaration for IOC FC functions 58 * forward declaration for IOC FC functions
59 */ 59 */
60static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 60static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
@@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
63static void bfa_iocfc_reset_cbfn(void *bfa_arg); 63static void bfa_iocfc_reset_cbfn(void *bfa_arg);
64static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 64static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
65 65
66/** 66/*
67 * BFA Interrupt handling functions 67 * BFA Interrupt handling functions
68 */ 68 */
69static void 69static void
@@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
86 86
87 waitq = bfa_reqq(bfa, qid); 87 waitq = bfa_reqq(bfa, qid);
88 list_for_each_safe(qe, qen, waitq) { 88 list_for_each_safe(qe, qen, waitq) {
89 /** 89 /*
90 * Callback only as long as there is room in request queue 90 * Callback only as long as there is room in request queue
91 */ 91 */
92 if (bfa_reqq_full(bfa, qid)) 92 if (bfa_reqq_full(bfa, qid))
@@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
104 bfa_intx(bfa); 104 bfa_intx(bfa);
105} 105}
106 106
107/** 107/*
108 * hal_intr_api 108 * hal_intr_api
109 */ 109 */
110bfa_boolean_t 110bfa_boolean_t
@@ -117,7 +117,7 @@ bfa_intx(struct bfa_s *bfa)
117 if (!intr) 117 if (!intr)
118 return BFA_FALSE; 118 return BFA_FALSE;
119 119
120 /** 120 /*
121 * RME completion queue interrupt 121 * RME completion queue interrupt
122 */ 122 */
123 qintr = intr & __HFN_INT_RME_MASK; 123 qintr = intr & __HFN_INT_RME_MASK;
@@ -131,7 +131,7 @@ bfa_intx(struct bfa_s *bfa)
131 if (!intr) 131 if (!intr)
132 return BFA_TRUE; 132 return BFA_TRUE;
133 133
134 /** 134 /*
135 * CPE completion queue interrupt 135 * CPE completion queue interrupt
136 */ 136 */
137 qintr = intr & __HFN_INT_CPE_MASK; 137 qintr = intr & __HFN_INT_CPE_MASK;
@@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid)
211 211
212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); 212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
213 213
214 /** 214 /*
215 * Resume any pending requests in the corresponding reqq. 215 * Resume any pending requests in the corresponding reqq.
216 */ 216 */
217 waitq = bfa_reqq(bfa, qid); 217 waitq = bfa_reqq(bfa, qid);
@@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
259 } 259 }
260 } 260 }
261 261
262 /** 262 /*
263 * update CI 263 * update CI
264 */ 264 */
265 bfa_rspq_ci(bfa, qid) = pi; 265 bfa_rspq_ci(bfa, qid) = pi;
266 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); 266 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
267 mmiowb(); 267 mmiowb();
268 268
269 /** 269 /*
270 * Resume any pending requests in the corresponding reqq. 270 * Resume any pending requests in the corresponding reqq.
271 */ 271 */
272 waitq = bfa_reqq(bfa, qid); 272 waitq = bfa_reqq(bfa, qid);
@@ -289,7 +289,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
289 289
290 if (intr) { 290 if (intr) {
291 if (intr & __HFN_INT_LL_HALT) { 291 if (intr & __HFN_INT_LL_HALT) {
292 /** 292 /*
293 * If LL_HALT bit is set then FW Init Halt LL Port 293 * If LL_HALT bit is set then FW Init Halt LL Port
294 * Register needs to be cleared as well so Interrupt 294 * Register needs to be cleared as well so Interrupt
295 * Status Register will be cleared. 295 * Status Register will be cleared.
@@ -300,7 +300,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
300 } 300 }
301 301
302 if (intr & __HFN_INT_ERR_PSS) { 302 if (intr & __HFN_INT_ERR_PSS) {
303 /** 303 /*
304 * ERR_PSS bit needs to be cleared as well in case 304 * ERR_PSS bit needs to be cleared as well in case
305 * interrups are shared so driver's interrupt handler is 305 * interrups are shared so driver's interrupt handler is
306 * still called eventhough it is already masked out. 306 * still called eventhough it is already masked out.
@@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
323 bfa_isrs[mc] = isr_func; 323 bfa_isrs[mc] = isr_func;
324} 324}
325 325
326/** 326/*
327 * BFA IOC FC related functions 327 * BFA IOC FC related functions
328 */ 328 */
329 329
330/** 330/*
331 * hal_ioc_pvt BFA IOC private functions 331 * hal_ioc_pvt BFA IOC private functions
332 */ 332 */
333 333
@@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
366 BFA_CACHELINE_SZ); 366 BFA_CACHELINE_SZ);
367} 367}
368 368
369/** 369/*
370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
371 */ 371 */
372static void 372static void
@@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg)
384 384
385 bfa_iocfc_reset_queues(bfa); 385 bfa_iocfc_reset_queues(bfa);
386 386
387 /** 387 /*
388 * initialize IOC configuration info 388 * initialize IOC configuration info
389 */ 389 */
390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
391 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 391 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
392 392
393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
394 /** 394 /*
395 * dma map REQ and RSP circular queues and shadow pointers 395 * dma map REQ and RSP circular queues and shadow pointers
396 */ 396 */
397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
@@ -410,7 +410,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
410 cpu_to_be16(cfg->drvcfg.num_rspq_elems); 410 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
411 } 411 }
412 412
413 /** 413 /*
414 * Enable interrupt coalescing if it is driver init path 414 * Enable interrupt coalescing if it is driver init path
415 * and not ioc disable/enable path. 415 * and not ioc disable/enable path.
416 */ 416 */
@@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
419 419
420 iocfc->cfgdone = BFA_FALSE; 420 iocfc->cfgdone = BFA_FALSE;
421 421
422 /** 422 /*
423 * dma map IOC configuration itself 423 * dma map IOC configuration itself
424 */ 424 */
425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@@ -442,7 +442,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
442 442
443 iocfc->cfg = *cfg; 443 iocfc->cfg = *cfg;
444 444
445 /** 445 /*
446 * Initialize chip specific handlers. 446 * Initialize chip specific handlers.
447 */ 447 */
448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { 448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
@@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
559 } 559 }
560} 560}
561 561
562/** 562/*
563 * Start BFA submodules. 563 * Start BFA submodules.
564 */ 564 */
565static void 565static void
@@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
573 hal_mods[i]->start(bfa); 573 hal_mods[i]->start(bfa);
574} 574}
575 575
576/** 576/*
577 * Disable BFA submodules. 577 * Disable BFA submodules.
578 */ 578 */
579static void 579static void
@@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
623 complete(&bfad->disable_comp); 623 complete(&bfad->disable_comp);
624} 624}
625 625
626/** 626/*
627 * Update BFA configuration from firmware configuration. 627 * Update BFA configuration from firmware configuration.
628 */ 628 */
629static void 629static void
@@ -642,7 +642,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
642 642
643 iocfc->cfgdone = BFA_TRUE; 643 iocfc->cfgdone = BFA_TRUE;
644 644
645 /** 645 /*
646 * Configuration is complete - initialize/start submodules 646 * Configuration is complete - initialize/start submodules
647 */ 647 */
648 bfa_fcport_init(bfa); 648 bfa_fcport_init(bfa);
@@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
665 } 665 }
666} 666}
667 667
668/** 668/*
669 * IOC enable request is complete 669 * IOC enable request is complete
670 */ 670 */
671static void 671static void
@@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
684 bfa_iocfc_send_cfg(bfa); 684 bfa_iocfc_send_cfg(bfa);
685} 685}
686 686
687/** 687/*
688 * IOC disable request is complete 688 * IOC disable request is complete
689 */ 689 */
690static void 690static void
@@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
705 } 705 }
706} 706}
707 707
708/** 708/*
709 * Notify sub-modules of hardware failure. 709 * Notify sub-modules of hardware failure.
710 */ 710 */
711static void 711static void
@@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
723 bfa); 723 bfa);
724} 724}
725 725
726/** 726/*
727 * Actions on chip-reset completion. 727 * Actions on chip-reset completion.
728 */ 728 */
729static void 729static void
@@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
735 bfa_isr_enable(bfa); 735 bfa_isr_enable(bfa);
736} 736}
737 737
738/** 738/*
739 * hal_ioc_public 739 * hal_ioc_public
740 */ 740 */
741 741
742/** 742/*
743 * Query IOC memory requirement information. 743 * Query IOC memory requirement information.
744 */ 744 */
745void 745void
@@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); 754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
755} 755}
756 756
757/** 757/*
758 * Query IOC memory requirement information. 758 * Query IOC memory requirement information.
759 */ 759 */
760void 760void
@@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
772 ioc->trcmod = bfa->trcmod; 772 ioc->trcmod = bfa->trcmod;
773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
774 774
775 /** 775 /*
776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. 776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
777 */ 777 */
778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) 778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
@@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
791} 791}
792 792
793/** 793/*
794 * Query IOC memory requirement information. 794 * Query IOC memory requirement information.
795 */ 795 */
796void 796void
@@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa)
799 bfa_ioc_detach(&bfa->ioc); 799 bfa_ioc_detach(&bfa->ioc);
800} 800}
801 801
802/** 802/*
803 * Query IOC memory requirement information. 803 * Query IOC memory requirement information.
804 */ 804 */
805void 805void
@@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
809 bfa_ioc_enable(&bfa->ioc); 809 bfa_ioc_enable(&bfa->ioc);
810} 810}
811 811
812/** 812/*
813 * IOC start called from bfa_start(). Called to start IOC operations 813 * IOC start called from bfa_start(). Called to start IOC operations
814 * at driver instantiation for this instance. 814 * at driver instantiation for this instance.
815 */ 815 */
@@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa)
820 bfa_iocfc_start_submod(bfa); 820 bfa_iocfc_start_submod(bfa);
821} 821}
822 822
823/** 823/*
824 * IOC stop called from bfa_stop(). Called only when driver is unloaded 824 * IOC stop called from bfa_stop(). Called only when driver is unloaded
825 * for this instance. 825 * for this instance.
826 */ 826 */
@@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
926} 926}
927/** 927/*
928 * Enable IOC after it is disabled. 928 * Enable IOC after it is disabled.
929 */ 929 */
930void 930void
@@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
954} 954}
955 955
956/** 956/*
957 * Return boot target port wwns -- read from boot information in flash. 957 * Return boot target port wwns -- read from boot information in flash.
958 */ 958 */
959void 959void
@@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
998 return cfgrsp->pbc_cfg.nvports; 998 return cfgrsp->pbc_cfg.nvports;
999} 999}
1000 1000
1001/** 1001/*
1002 * hal_api 1002 * hal_api
1003 */ 1003 */
1004 1004
1005/** 1005/*
1006 * Use this function query the memory requirement of the BFA library. 1006 * Use this function query the memory requirement of the BFA library.
1007 * This function needs to be called before bfa_attach() to get the 1007 * This function needs to be called before bfa_attach() to get the
1008 * memory required of the BFA layer for a given driver configuration. 1008 * memory required of the BFA layer for a given driver configuration.
@@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1056} 1056}
1057 1057
1058/** 1058/*
1059 * Use this function to do attach the driver instance with the BFA 1059 * Use this function to do attach the driver instance with the BFA
1060 * library. This function will not trigger any HW initialization 1060 * library. This function will not trigger any HW initialization
1061 * process (which will be done in bfa_init() call) 1061 * process (which will be done in bfa_init() call)
@@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1092 1092
1093 bfa_assert((cfg != NULL) && (meminfo != NULL)); 1093 bfa_assert((cfg != NULL) && (meminfo != NULL));
1094 1094
1095 /** 1095 /*
1096 * initialize all memory pointers for iterative allocation 1096 * initialize all memory pointers for iterative allocation
1097 */ 1097 */
1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
@@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1109 bfa_com_port_attach(bfa, meminfo); 1109 bfa_com_port_attach(bfa, meminfo);
1110} 1110}
1111 1111
1112/** 1112/*
1113 * Use this function to delete a BFA IOC. IOC should be stopped (by 1113 * Use this function to delete a BFA IOC. IOC should be stopped (by
1114 * calling bfa_stop()) before this function call. 1114 * calling bfa_stop()) before this function call.
1115 * 1115 *
@@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
1146 bfa->plog = plog; 1146 bfa->plog = plog;
1147} 1147}
1148 1148
1149/** 1149/*
1150 * Initialize IOC. 1150 * Initialize IOC.
1151 * 1151 *
1152 * This function will return immediately, when the IOC initialization is 1152 * This function will return immediately, when the IOC initialization is
@@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa)
1169 bfa_iocfc_init(bfa); 1169 bfa_iocfc_init(bfa);
1170} 1170}
1171 1171
1172/** 1172/*
1173 * Use this function initiate the IOC configuration setup. This function 1173 * Use this function initiate the IOC configuration setup. This function
1174 * will return immediately. 1174 * will return immediately.
1175 * 1175 *
@@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa)
1183 bfa_iocfc_start(bfa); 1183 bfa_iocfc_start(bfa);
1184} 1184}
1185 1185
1186/** 1186/*
1187 * Use this function quiese the IOC. This function will return immediately, 1187 * Use this function quiese the IOC. This function will return immediately,
1188 * when the IOC is actually stopped, the bfad->comp will be set. 1188 * when the IOC is actually stopped, the bfad->comp will be set.
1189 * 1189 *
@@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa)
1243 bfa->fcs = BFA_TRUE; 1243 bfa->fcs = BFA_TRUE;
1244} 1244}
1245 1245
1246/** 1246/*
1247 * Periodic timer heart beat from driver 1247 * Periodic timer heart beat from driver
1248 */ 1248 */
1249void 1249void
@@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa)
1252 bfa_timer_beat(&bfa->timer_mod); 1252 bfa_timer_beat(&bfa->timer_mod);
1253} 1253}
1254 1254
1255/** 1255/*
1256 * Return the list of PCI vendor/device id lists supported by this 1256 * Return the list of PCI vendor/device id lists supported by this
1257 * BFA instance. 1257 * BFA instance.
1258 */ 1258 */
@@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1270 *pciids = __pciids; 1270 *pciids = __pciids;
1271} 1271}
1272 1272
1273/** 1273/*
1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled 1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1275 * into BFA layer). The OS driver can then turn back and overwrite entries that 1275 * into BFA layer). The OS driver can then turn back and overwrite entries that
1276 * have been configured by the user. 1276 * have been configured by the user.
@@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
1329} 1329}
1330 1330
1331/** 1331/*
1332 * Retrieve firmware trace information on IOC failure. 1332 * Retrieve firmware trace information on IOC failure.
1333 */ 1333 */
1334bfa_status_t 1334bfa_status_t
@@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); 1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
1338} 1338}
1339 1339
1340/** 1340/*
1341 * Clear the saved firmware trace information of an IOC. 1341 * Clear the saved firmware trace information of an IOC.
1342 */ 1342 */
1343void 1343void
@@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc); 1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc);
1347} 1347}
1348 1348
1349/** 1349/*
1350 * Fetch firmware trace data. 1350 * Fetch firmware trace data.
1351 * 1351 *
1352 * @param[in] bfa BFA instance 1352 * @param[in] bfa BFA instance
@@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
1363} 1363}
1364 1364
1365/** 1365/*
1366 * Dump firmware memory. 1366 * Dump firmware memory.
1367 * 1367 *
1368 * @param[in] bfa BFA instance 1368 * @param[in] bfa BFA instance
@@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
1378{ 1378{
1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); 1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
1380} 1380}
1381/** 1381/*
1382 * Reset hw semaphore & usage cnt regs and initialize. 1382 * Reset hw semaphore & usage cnt regs and initialize.
1383 */ 1383 */
1384void 1384void
@@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa)
1388 bfa_ioc_pll_init(&bfa->ioc); 1388 bfa_ioc_pll_init(&bfa->ioc);
1389} 1389}
1390 1390
1391/** 1391/*
1392 * Fetch firmware statistics data. 1392 * Fetch firmware statistics data.
1393 * 1393 *
1394 * @param[in] bfa BFA instance 1394 * @param[in] bfa BFA instance
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c
index 14127646dc54..0222d7c88a9a 100644
--- a/drivers/scsi/bfa/bfa_drv.c
+++ b/drivers/scsi/bfa/bfa_drv.c
@@ -17,7 +17,7 @@
17 17
18#include "bfa_modules.h" 18#include "bfa_modules.h"
19 19
20/** 20/*
21 * BFA module list terminated by NULL 21 * BFA module list terminated by NULL
22 */ 22 */
23struct bfa_module_s *hal_mods[] = { 23struct bfa_module_s *hal_mods[] = {
@@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = {
31 NULL 31 NULL
32}; 32};
33 33
34/** 34/*
35 * Message handlers for various modules. 35 * Message handlers for various modules.
36 */ 36 */
37bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 37bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
@@ -70,7 +70,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
70}; 70};
71 71
72 72
73/** 73/*
74 * Message handlers for mailbox command classes 74 * Message handlers for mailbox command classes
75 */ 75 */
76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { 76bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 85b005093da9..9c725314b513 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -150,7 +150,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
150 fchs->s_id = (s_id); 150 fchs->s_id = (s_id);
151 fchs->ox_id = cpu_to_be16(ox_id); 151 fchs->ox_id = cpu_to_be16(ox_id);
152 152
153 /** 153 /*
154 * @todo no need to set ox_id for request 154 * @todo no need to set ox_id for request
155 * no need to set rx_id for response 155 * no need to set rx_id for response
156 */ 156 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 8c585bd855ed..135c4427801c 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -26,7 +26,7 @@ BFA_MODULE(fcpim);
26 (__l->__stats += __r->__stats) 26 (__l->__stats += __r->__stats)
27 27
28 28
29/** 29/*
30 * BFA ITNIM Related definitions 30 * BFA ITNIM Related definitions
31 */ 31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
@@ -72,7 +72,7 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
72 } \ 72 } \
73} while (0) 73} while (0)
74 74
75/** 75/*
76 * bfa_itnim_sm BFA itnim state machine 76 * bfa_itnim_sm BFA itnim state machine
77 */ 77 */
78 78
@@ -89,7 +89,7 @@ enum bfa_itnim_event {
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ 89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90}; 90};
91 91
92/** 92/*
93 * BFA IOIM related definitions 93 * BFA IOIM related definitions
94 */ 94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \ 95#define bfa_ioim_move_to_comp_q(__ioim) do { \
@@ -107,11 +107,11 @@ enum bfa_itnim_event {
107 if ((__fcpim)->profile_start) \ 107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \ 108 (__fcpim)->profile_start(__ioim); \
109} while (0) 109} while (0)
110/** 110/*
111 * hal_ioim_sm 111 * hal_ioim_sm
112 */ 112 */
113 113
114/** 114/*
115 * IO state machine events 115 * IO state machine events
116 */ 116 */
117enum bfa_ioim_event { 117enum bfa_ioim_event {
@@ -136,11 +136,11 @@ enum bfa_ioim_event {
136}; 136};
137 137
138 138
139/** 139/*
140 * BFA TSKIM related definitions 140 * BFA TSKIM related definitions
141 */ 141 */
142 142
143/** 143/*
144 * task management completion handling 144 * task management completion handling
145 */ 145 */
146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ 146#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
@@ -165,7 +165,7 @@ enum bfa_tskim_event {
165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 165 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
166}; 166};
167 167
168/** 168/*
169 * forward declaration for BFA ITNIM functions 169 * forward declaration for BFA ITNIM functions
170 */ 170 */
171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); 171static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
@@ -183,7 +183,7 @@ static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); 183static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); 184static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
185 185
186/** 186/*
187 * forward declaration of ITNIM state machine 187 * forward declaration of ITNIM state machine
188 */ 188 */
189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, 189static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
@@ -217,7 +217,7 @@ static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, 217static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
218 enum bfa_itnim_event event); 218 enum bfa_itnim_event event);
219 219
220/** 220/*
221 * forward declaration for BFA IOIM functions 221 * forward declaration for BFA IOIM functions
222 */ 222 */
223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); 223static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
@@ -233,7 +233,7 @@ static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 233static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
234 234
235 235
236/** 236/*
237 * forward declaration of BFA IO state machine 237 * forward declaration of BFA IO state machine
238 */ 238 */
239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, 239static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
@@ -261,7 +261,7 @@ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, 261static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event); 262 enum bfa_ioim_event event);
263 263
264/** 264/*
265 * forward declaration for BFA TSKIM functions 265 * forward declaration for BFA TSKIM functions
266 */ 266 */
267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); 267static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
@@ -276,7 +276,7 @@ static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); 276static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
277 277
278 278
279/** 279/*
280 * forward declaration of BFA TSKIM state machine 280 * forward declaration of BFA TSKIM state machine
281 */ 281 */
282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, 282static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
@@ -294,11 +294,11 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, 294static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
295 enum bfa_tskim_event event); 295 enum bfa_tskim_event event);
296 296
297/** 297/*
298 * hal_fcpim_mod BFA FCP Initiator Mode module 298 * hal_fcpim_mod BFA FCP Initiator Mode module
299 */ 299 */
300 300
301/** 301/*
302 * Compute and return memory needed by FCP(im) module. 302 * Compute and return memory needed by FCP(im) module.
303 */ 303 */
304static void 304static void
@@ -307,7 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
307{ 307{
308 bfa_itnim_meminfo(cfg, km_len, dm_len); 308 bfa_itnim_meminfo(cfg, km_len, dm_len);
309 309
310 /** 310 /*
311 * IO memory 311 * IO memory
312 */ 312 */
313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) 313 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
@@ -320,7 +320,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
320 320
321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; 321 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
322 322
323 /** 323 /*
324 * task management command memory 324 * task management command memory
325 */ 325 */
326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) 326 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
@@ -604,11 +604,11 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
604 604
605 605
606 606
607/** 607/*
608 * BFA ITNIM module state machine functions 608 * BFA ITNIM module state machine functions
609 */ 609 */
610 610
611/** 611/*
612 * Beginning/unallocated state - no events expected. 612 * Beginning/unallocated state - no events expected.
613 */ 613 */
614static void 614static void
@@ -629,7 +629,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
629 } 629 }
630} 630}
631 631
632/** 632/*
633 * Beginning state, only online event expected. 633 * Beginning state, only online event expected.
634 */ 634 */
635static void 635static void
@@ -660,7 +660,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
660 } 660 }
661} 661}
662 662
663/** 663/*
664 * Waiting for itnim create response from firmware. 664 * Waiting for itnim create response from firmware.
665 */ 665 */
666static void 666static void
@@ -732,7 +732,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
732 } 732 }
733} 733}
734 734
735/** 735/*
736 * Waiting for itnim create response from firmware, a delete is pending. 736 * Waiting for itnim create response from firmware, a delete is pending.
737 */ 737 */
738static void 738static void
@@ -760,7 +760,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
760 } 760 }
761} 761}
762 762
763/** 763/*
764 * Online state - normal parking state. 764 * Online state - normal parking state.
765 */ 765 */
766static void 766static void
@@ -802,7 +802,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
802 } 802 }
803} 803}
804 804
805/** 805/*
806 * Second level error recovery need. 806 * Second level error recovery need.
807 */ 807 */
808static void 808static void
@@ -833,7 +833,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
833 } 833 }
834} 834}
835 835
836/** 836/*
837 * Going offline. Waiting for active IO cleanup. 837 * Going offline. Waiting for active IO cleanup.
838 */ 838 */
839static void 839static void
@@ -870,7 +870,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
870 } 870 }
871} 871}
872 872
873/** 873/*
874 * Deleting itnim. Waiting for active IO cleanup. 874 * Deleting itnim. Waiting for active IO cleanup.
875 */ 875 */
876static void 876static void
@@ -898,7 +898,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
898 } 898 }
899} 899}
900 900
901/** 901/*
902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. 902 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
903 */ 903 */
904static void 904static void
@@ -955,7 +955,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
955 } 955 }
956} 956}
957 957
958/** 958/*
959 * Offline state. 959 * Offline state.
960 */ 960 */
961static void 961static void
@@ -987,7 +987,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
987 } 987 }
988} 988}
989 989
990/** 990/*
991 * IOC h/w failed state. 991 * IOC h/w failed state.
992 */ 992 */
993static void 993static void
@@ -1023,7 +1023,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
1023 } 1023 }
1024} 1024}
1025 1025
1026/** 1026/*
1027 * Itnim is deleted, waiting for firmware response to delete. 1027 * Itnim is deleted, waiting for firmware response to delete.
1028 */ 1028 */
1029static void 1029static void
@@ -1068,7 +1068,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1068 } 1068 }
1069} 1069}
1070 1070
1071/** 1071/*
1072 * Initiate cleanup of all IOs on an IOC failure. 1072 * Initiate cleanup of all IOs on an IOC failure.
1073 */ 1073 */
1074static void 1074static void
@@ -1088,7 +1088,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1088 bfa_ioim_iocdisable(ioim); 1088 bfa_ioim_iocdisable(ioim);
1089 } 1089 }
1090 1090
1091 /** 1091 /*
1092 * For IO request in pending queue, we pretend an early timeout. 1092 * For IO request in pending queue, we pretend an early timeout.
1093 */ 1093 */
1094 list_for_each_safe(qe, qen, &itnim->pending_q) { 1094 list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -1102,7 +1102,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1102 } 1102 }
1103} 1103}
1104 1104
1105/** 1105/*
1106 * IO cleanup completion 1106 * IO cleanup completion
1107 */ 1107 */
1108static void 1108static void
@@ -1114,7 +1114,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg)
1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); 1114 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1115} 1115}
1116 1116
1117/** 1117/*
1118 * Initiate cleanup of all IOs. 1118 * Initiate cleanup of all IOs.
1119 */ 1119 */
1120static void 1120static void
@@ -1129,7 +1129,7 @@ bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1129 list_for_each_safe(qe, qen, &itnim->io_q) { 1129 list_for_each_safe(qe, qen, &itnim->io_q) {
1130 ioim = (struct bfa_ioim_s *) qe; 1130 ioim = (struct bfa_ioim_s *) qe;
1131 1131
1132 /** 1132 /*
1133 * Move IO to a cleanup queue from active queue so that a later 1133 * Move IO to a cleanup queue from active queue so that a later
1134 * TM will not pickup this IO. 1134 * TM will not pickup this IO.
1135 */ 1135 */
@@ -1176,7 +1176,7 @@ __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1176 bfa_cb_itnim_sler(itnim->ditn); 1176 bfa_cb_itnim_sler(itnim->ditn);
1177} 1177}
1178 1178
1179/** 1179/*
1180 * Call to resume any I/O requests waiting for room in request queue. 1180 * Call to resume any I/O requests waiting for room in request queue.
1181 */ 1181 */
1182static void 1182static void
@@ -1190,7 +1190,7 @@ bfa_itnim_qresume(void *cbarg)
1190 1190
1191 1191
1192 1192
1193/** 1193/*
1194 * bfa_itnim_public 1194 * bfa_itnim_public
1195 */ 1195 */
1196 1196
@@ -1210,7 +1210,7 @@ void
1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1210bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1211 u32 *dm_len) 1211 u32 *dm_len)
1212{ 1212{
1213 /** 1213 /*
1214 * ITN memory 1214 * ITN memory
1215 */ 1215 */
1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); 1216 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
@@ -1264,7 +1264,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1264 1264
1265 itnim->msg_no++; 1265 itnim->msg_no++;
1266 1266
1267 /** 1267 /*
1268 * check for room in queue to send request now 1268 * check for room in queue to send request now
1269 */ 1269 */
1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq); 1270 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1281,7 +1281,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1281 m->msg_no = itnim->msg_no; 1281 m->msg_no = itnim->msg_no;
1282 bfa_stats(itnim, fw_create); 1282 bfa_stats(itnim, fw_create);
1283 1283
1284 /** 1284 /*
1285 * queue I/O message to firmware 1285 * queue I/O message to firmware
1286 */ 1286 */
1287 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1287 bfa_reqq_produce(itnim->bfa, itnim->reqq);
@@ -1293,7 +1293,7 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1293{ 1293{
1294 struct bfi_itnim_delete_req_s *m; 1294 struct bfi_itnim_delete_req_s *m;
1295 1295
1296 /** 1296 /*
1297 * check for room in queue to send request now 1297 * check for room in queue to send request now
1298 */ 1298 */
1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq); 1299 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1307,14 +1307,14 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1307 m->fw_handle = itnim->rport->fw_handle; 1307 m->fw_handle = itnim->rport->fw_handle;
1308 bfa_stats(itnim, fw_delete); 1308 bfa_stats(itnim, fw_delete);
1309 1309
1310 /** 1310 /*
1311 * queue I/O message to firmware 1311 * queue I/O message to firmware
1312 */ 1312 */
1313 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1313 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1314 return BFA_TRUE; 1314 return BFA_TRUE;
1315} 1315}
1316 1316
1317/** 1317/*
1318 * Cleanup all pending failed inflight requests. 1318 * Cleanup all pending failed inflight requests.
1319 */ 1319 */
1320static void 1320static void
@@ -1329,7 +1329,7 @@ bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1329 } 1329 }
1330} 1330}
1331 1331
1332/** 1332/*
1333 * Start all pending IO requests. 1333 * Start all pending IO requests.
1334 */ 1334 */
1335static void 1335static void
@@ -1339,12 +1339,12 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1339 1339
1340 bfa_itnim_iotov_stop(itnim); 1340 bfa_itnim_iotov_stop(itnim);
1341 1341
1342 /** 1342 /*
1343 * Abort all inflight IO requests in the queue 1343 * Abort all inflight IO requests in the queue
1344 */ 1344 */
1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE); 1345 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1346 1346
1347 /** 1347 /*
1348 * Start all pending IO requests. 1348 * Start all pending IO requests.
1349 */ 1349 */
1350 while (!list_empty(&itnim->pending_q)) { 1350 while (!list_empty(&itnim->pending_q)) {
@@ -1354,7 +1354,7 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1354 } 1354 }
1355} 1355}
1356 1356
1357/** 1357/*
1358 * Fail all pending IO requests 1358 * Fail all pending IO requests
1359 */ 1359 */
1360static void 1360static void
@@ -1362,12 +1362,12 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1362{ 1362{
1363 struct bfa_ioim_s *ioim; 1363 struct bfa_ioim_s *ioim;
1364 1364
1365 /** 1365 /*
1366 * Fail all inflight IO requests in the queue 1366 * Fail all inflight IO requests in the queue
1367 */ 1367 */
1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE); 1368 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1369 1369
1370 /** 1370 /*
1371 * Fail any pending IO requests. 1371 * Fail any pending IO requests.
1372 */ 1372 */
1373 while (!list_empty(&itnim->pending_q)) { 1373 while (!list_empty(&itnim->pending_q)) {
@@ -1377,7 +1377,7 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1377 } 1377 }
1378} 1378}
1379 1379
1380/** 1380/*
1381 * IO TOV timer callback. Fail any pending IO requests. 1381 * IO TOV timer callback. Fail any pending IO requests.
1382 */ 1382 */
1383static void 1383static void
@@ -1392,7 +1392,7 @@ bfa_itnim_iotov(void *itnim_arg)
1392 bfa_cb_itnim_tov(itnim->ditn); 1392 bfa_cb_itnim_tov(itnim->ditn);
1393} 1393}
1394 1394
1395/** 1395/*
1396 * Start IO TOV timer for failing back pending IO requests in offline state. 1396 * Start IO TOV timer for failing back pending IO requests in offline state.
1397 */ 1397 */
1398static void 1398static void
@@ -1407,7 +1407,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1407 } 1407 }
1408} 1408}
1409 1409
1410/** 1410/*
1411 * Stop IO TOV timer. 1411 * Stop IO TOV timer.
1412 */ 1412 */
1413static void 1413static void
@@ -1419,7 +1419,7 @@ bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1419 } 1419 }
1420} 1420}
1421 1421
1422/** 1422/*
1423 * Stop IO TOV timer. 1423 * Stop IO TOV timer.
1424 */ 1424 */
1425static void 1425static void
@@ -1459,11 +1459,11 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1459 1459
1460 1460
1461 1461
1462/** 1462/*
1463 * bfa_itnim_public 1463 * bfa_itnim_public
1464 */ 1464 */
1465 1465
1466/** 1466/*
1467 * Itnim interrupt processing. 1467 * Itnim interrupt processing.
1468 */ 1468 */
1469void 1469void
@@ -1509,7 +1509,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1509 1509
1510 1510
1511 1511
1512/** 1512/*
1513 * bfa_itnim_api 1513 * bfa_itnim_api
1514 */ 1514 */
1515 1515
@@ -1552,7 +1552,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim)
1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); 1552 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1553} 1553}
1554 1554
1555/** 1555/*
1556 * Return true if itnim is considered offline for holding off IO request. 1556 * Return true if itnim is considered offline for holding off IO request.
1557 * IO is not held if itnim is being deleted. 1557 * IO is not held if itnim is being deleted.
1558 */ 1558 */
@@ -1603,11 +1603,11 @@ bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1603 itnim->ioprofile.io_latency.min[j] = ~0; 1603 itnim->ioprofile.io_latency.min[j] = ~0;
1604} 1604}
1605 1605
1606/** 1606/*
1607 * BFA IO module state machine functions 1607 * BFA IO module state machine functions
1608 */ 1608 */
1609 1609
1610/** 1610/*
1611 * IO is not started (unallocated). 1611 * IO is not started (unallocated).
1612 */ 1612 */
1613static void 1613static void
@@ -1657,7 +1657,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1657 break; 1657 break;
1658 1658
1659 case BFA_IOIM_SM_ABORT: 1659 case BFA_IOIM_SM_ABORT:
1660 /** 1660 /*
1661 * IO in pending queue can get abort requests. Complete abort 1661 * IO in pending queue can get abort requests. Complete abort
1662 * requests immediately. 1662 * requests immediately.
1663 */ 1663 */
@@ -1672,7 +1672,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1672 } 1672 }
1673} 1673}
1674 1674
1675/** 1675/*
1676 * IO is waiting for SG pages. 1676 * IO is waiting for SG pages.
1677 */ 1677 */
1678static void 1678static void
@@ -1719,7 +1719,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1719 } 1719 }
1720} 1720}
1721 1721
1722/** 1722/*
1723 * IO is active. 1723 * IO is active.
1724 */ 1724 */
1725static void 1725static void
@@ -1803,7 +1803,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1803 } 1803 }
1804} 1804}
1805 1805
1806/** 1806/*
1807* IO is retried with new tag. 1807* IO is retried with new tag.
1808*/ 1808*/
1809static void 1809static void
@@ -1844,7 +1844,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1844 break; 1844 break;
1845 1845
1846 case BFA_IOIM_SM_ABORT: 1846 case BFA_IOIM_SM_ABORT:
1847 /** in this state IO abort is done. 1847 /* in this state IO abort is done.
1848 * Waiting for IO tag resource free. 1848 * Waiting for IO tag resource free.
1849 */ 1849 */
1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); 1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
@@ -1857,7 +1857,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1857 } 1857 }
1858} 1858}
1859 1859
1860/** 1860/*
1861 * IO is being aborted, waiting for completion from firmware. 1861 * IO is being aborted, waiting for completion from firmware.
1862 */ 1862 */
1863static void 1863static void
@@ -1919,7 +1919,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1919 } 1919 }
1920} 1920}
1921 1921
1922/** 1922/*
1923 * IO is being cleaned up (implicit abort), waiting for completion from 1923 * IO is being cleaned up (implicit abort), waiting for completion from
1924 * firmware. 1924 * firmware.
1925 */ 1925 */
@@ -1937,7 +1937,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1937 break; 1937 break;
1938 1938
1939 case BFA_IOIM_SM_ABORT: 1939 case BFA_IOIM_SM_ABORT:
1940 /** 1940 /*
1941 * IO is already being aborted implicitly 1941 * IO is already being aborted implicitly
1942 */ 1942 */
1943 ioim->io_cbfn = __bfa_cb_ioim_abort; 1943 ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -1969,7 +1969,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1969 break; 1969 break;
1970 1970
1971 case BFA_IOIM_SM_CLEANUP: 1971 case BFA_IOIM_SM_CLEANUP:
1972 /** 1972 /*
1973 * IO can be in cleanup state already due to TM command. 1973 * IO can be in cleanup state already due to TM command.
1974 * 2nd cleanup request comes from ITN offline event. 1974 * 2nd cleanup request comes from ITN offline event.
1975 */ 1975 */
@@ -1980,7 +1980,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1980 } 1980 }
1981} 1981}
1982 1982
1983/** 1983/*
1984 * IO is waiting for room in request CQ 1984 * IO is waiting for room in request CQ
1985 */ 1985 */
1986static void 1986static void
@@ -2024,7 +2024,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2024 } 2024 }
2025} 2025}
2026 2026
2027/** 2027/*
2028 * Active IO is being aborted, waiting for room in request CQ. 2028 * Active IO is being aborted, waiting for room in request CQ.
2029 */ 2029 */
2030static void 2030static void
@@ -2075,7 +2075,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2075 } 2075 }
2076} 2076}
2077 2077
2078/** 2078/*
2079 * Active IO is being cleaned up, waiting for room in request CQ. 2079 * Active IO is being cleaned up, waiting for room in request CQ.
2080 */ 2080 */
2081static void 2081static void
@@ -2091,7 +2091,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2091 break; 2091 break;
2092 2092
2093 case BFA_IOIM_SM_ABORT: 2093 case BFA_IOIM_SM_ABORT:
2094 /** 2094 /*
2095 * IO is alraedy being cleaned up implicitly 2095 * IO is alraedy being cleaned up implicitly
2096 */ 2096 */
2097 ioim->io_cbfn = __bfa_cb_ioim_abort; 2097 ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -2125,7 +2125,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2125 } 2125 }
2126} 2126}
2127 2127
2128/** 2128/*
2129 * IO bfa callback is pending. 2129 * IO bfa callback is pending.
2130 */ 2130 */
2131static void 2131static void
@@ -2152,7 +2152,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2152 } 2152 }
2153} 2153}
2154 2154
2155/** 2155/*
2156 * IO bfa callback is pending. IO resource cannot be freed. 2156 * IO bfa callback is pending. IO resource cannot be freed.
2157 */ 2157 */
2158static void 2158static void
@@ -2185,7 +2185,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2185 } 2185 }
2186} 2186}
2187 2187
2188/** 2188/*
2189 * IO is completed, waiting resource free from firmware. 2189 * IO is completed, waiting resource free from firmware.
2190 */ 2190 */
2191static void 2191static void
@@ -2214,7 +2214,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2214 2214
2215 2215
2216 2216
2217/** 2217/*
2218 * hal_ioim_private 2218 * hal_ioim_private
2219 */ 2219 */
2220 2220
@@ -2247,7 +2247,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2247 2247
2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; 2248 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2249 if (m->io_status == BFI_IOIM_STS_OK) { 2249 if (m->io_status == BFI_IOIM_STS_OK) {
2250 /** 2250 /*
2251 * setup sense information, if present 2251 * setup sense information, if present
2252 */ 2252 */
2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && 2253 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
@@ -2256,7 +2256,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2256 snsinfo = ioim->iosp->snsinfo; 2256 snsinfo = ioim->iosp->snsinfo;
2257 } 2257 }
2258 2258
2259 /** 2259 /*
2260 * setup residue value correctly for normal completions 2260 * setup residue value correctly for normal completions
2261 */ 2261 */
2262 if (m->resid_flags == FCP_RESID_UNDER) { 2262 if (m->resid_flags == FCP_RESID_UNDER) {
@@ -2327,7 +2327,7 @@ bfa_ioim_sgpg_alloced(void *cbarg)
2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); 2327 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2328} 2328}
2329 2329
2330/** 2330/*
2331 * Send I/O request to firmware. 2331 * Send I/O request to firmware.
2332 */ 2332 */
2333static bfa_boolean_t 2333static bfa_boolean_t
@@ -2343,7 +2343,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2343 struct scatterlist *sg; 2343 struct scatterlist *sg;
2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; 2344 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2345 2345
2346 /** 2346 /*
2347 * check for room in queue to send request now 2347 * check for room in queue to send request now
2348 */ 2348 */
2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2349 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
@@ -2354,14 +2354,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2354 return BFA_FALSE; 2354 return BFA_FALSE;
2355 } 2355 }
2356 2356
2357 /** 2357 /*
2358 * build i/o request message next 2358 * build i/o request message next
2359 */ 2359 */
2360 m->io_tag = cpu_to_be16(ioim->iotag); 2360 m->io_tag = cpu_to_be16(ioim->iotag);
2361 m->rport_hdl = ioim->itnim->rport->fw_handle; 2361 m->rport_hdl = ioim->itnim->rport->fw_handle;
2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); 2362 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2363 2363
2364 /** 2364 /*
2365 * build inline IO SG element here 2365 * build inline IO SG element here
2366 */ 2366 */
2367 sge = &m->sges[0]; 2367 sge = &m->sges[0];
@@ -2387,7 +2387,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2387 sge->flags = BFI_SGE_PGDLEN; 2387 sge->flags = BFI_SGE_PGDLEN;
2388 bfa_sge_to_be(sge); 2388 bfa_sge_to_be(sge);
2389 2389
2390 /** 2390 /*
2391 * set up I/O command parameters 2391 * set up I/O command parameters
2392 */ 2392 */
2393 m->cmnd = cmnd_z0; 2393 m->cmnd = cmnd_z0;
@@ -2397,7 +2397,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); 2397 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); 2398 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2399 2399
2400 /** 2400 /*
2401 * set up I/O message header 2401 * set up I/O message header
2402 */ 2402 */
2403 switch (m->cmnd.iodir) { 2403 switch (m->cmnd.iodir) {
@@ -2426,7 +2426,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); 2426 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); 2427 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
2428 2428
2429 /** 2429 /*
2430 * Handle large CDB (>16 bytes). 2430 * Handle large CDB (>16 bytes).
2431 */ 2431 */
2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - 2432 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
@@ -2440,14 +2440,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2440 } 2440 }
2441#endif 2441#endif
2442 2442
2443 /** 2443 /*
2444 * queue I/O message to firmware 2444 * queue I/O message to firmware
2445 */ 2445 */
2446 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2446 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2447 return BFA_TRUE; 2447 return BFA_TRUE;
2448} 2448}
2449 2449
2450/** 2450/*
2451 * Setup any additional SG pages needed.Inline SG element is setup 2451 * Setup any additional SG pages needed.Inline SG element is setup
2452 * at queuing time. 2452 * at queuing time.
2453 */ 2453 */
@@ -2458,7 +2458,7 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2458 2458
2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE); 2459 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2460 2460
2461 /** 2461 /*
2462 * allocate SG pages needed 2462 * allocate SG pages needed
2463 */ 2463 */
2464 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); 2464 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
@@ -2507,7 +2507,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2507 sge->sg_len = sg_dma_len(sg); 2507 sge->sg_len = sg_dma_len(sg);
2508 pgcumsz += sge->sg_len; 2508 pgcumsz += sge->sg_len;
2509 2509
2510 /** 2510 /*
2511 * set flags 2511 * set flags
2512 */ 2512 */
2513 if (i < (nsges - 1)) 2513 if (i < (nsges - 1))
@@ -2522,7 +2522,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2522 2522
2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); 2523 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2524 2524
2525 /** 2525 /*
2526 * set the link element of each page 2526 * set the link element of each page
2527 */ 2527 */
2528 if (sgeid == ioim->nsges) { 2528 if (sgeid == ioim->nsges) {
@@ -2539,7 +2539,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2539 } while (sgeid < ioim->nsges); 2539 } while (sgeid < ioim->nsges);
2540} 2540}
2541 2541
2542/** 2542/*
2543 * Send I/O abort request to firmware. 2543 * Send I/O abort request to firmware.
2544 */ 2544 */
2545static bfa_boolean_t 2545static bfa_boolean_t
@@ -2548,14 +2548,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2548 struct bfi_ioim_abort_req_s *m; 2548 struct bfi_ioim_abort_req_s *m;
2549 enum bfi_ioim_h2i msgop; 2549 enum bfi_ioim_h2i msgop;
2550 2550
2551 /** 2551 /*
2552 * check for room in queue to send request now 2552 * check for room in queue to send request now
2553 */ 2553 */
2554 m = bfa_reqq_next(ioim->bfa, ioim->reqq); 2554 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2555 if (!m) 2555 if (!m)
2556 return BFA_FALSE; 2556 return BFA_FALSE;
2557 2557
2558 /** 2558 /*
2559 * build i/o request message next 2559 * build i/o request message next
2560 */ 2560 */
2561 if (ioim->iosp->abort_explicit) 2561 if (ioim->iosp->abort_explicit)
@@ -2567,14 +2567,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2567 m->io_tag = cpu_to_be16(ioim->iotag); 2567 m->io_tag = cpu_to_be16(ioim->iotag);
2568 m->abort_tag = ++ioim->abort_tag; 2568 m->abort_tag = ++ioim->abort_tag;
2569 2569
2570 /** 2570 /*
2571 * queue I/O message to firmware 2571 * queue I/O message to firmware
2572 */ 2572 */
2573 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2573 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2574 return BFA_TRUE; 2574 return BFA_TRUE;
2575} 2575}
2576 2576
2577/** 2577/*
2578 * Call to resume any I/O requests waiting for room in request queue. 2578 * Call to resume any I/O requests waiting for room in request queue.
2579 */ 2579 */
2580static void 2580static void
@@ -2590,7 +2590,7 @@ bfa_ioim_qresume(void *cbarg)
2590static void 2590static void
2591bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) 2591bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2592{ 2592{
2593 /** 2593 /*
2594 * Move IO from itnim queue to fcpim global queue since itnim will be 2594 * Move IO from itnim queue to fcpim global queue since itnim will be
2595 * freed. 2595 * freed.
2596 */ 2596 */
@@ -2623,13 +2623,13 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2623 return BFA_TRUE; 2623 return BFA_TRUE;
2624} 2624}
2625 2625
2626/** 2626/*
2627 * or after the link comes back. 2627 * or after the link comes back.
2628 */ 2628 */
2629void 2629void
2630bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) 2630bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2631{ 2631{
2632 /** 2632 /*
2633 * If path tov timer expired, failback with PATHTOV status - these 2633 * If path tov timer expired, failback with PATHTOV status - these
2634 * IO requests are not normally retried by IO stack. 2634 * IO requests are not normally retried by IO stack.
2635 * 2635 *
@@ -2644,7 +2644,7 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2644 } 2644 }
2645 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); 2645 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2646 2646
2647 /** 2647 /*
2648 * Move IO to fcpim global queue since itnim will be 2648 * Move IO to fcpim global queue since itnim will be
2649 * freed. 2649 * freed.
2650 */ 2650 */
@@ -2654,11 +2654,11 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2654 2654
2655 2655
2656 2656
2657/** 2657/*
2658 * hal_ioim_friend 2658 * hal_ioim_friend
2659 */ 2659 */
2660 2660
2661/** 2661/*
2662 * Memory allocation and initialization. 2662 * Memory allocation and initialization.
2663 */ 2663 */
2664void 2664void
@@ -2670,7 +2670,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2670 u8 *snsinfo; 2670 u8 *snsinfo;
2671 u32 snsbufsz; 2671 u32 snsbufsz;
2672 2672
2673 /** 2673 /*
2674 * claim memory first 2674 * claim memory first
2675 */ 2675 */
2676 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); 2676 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
@@ -2681,7 +2681,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2681 fcpim->ioim_sp_arr = iosp; 2681 fcpim->ioim_sp_arr = iosp;
2682 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2682 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2683 2683
2684 /** 2684 /*
2685 * Claim DMA memory for per IO sense data. 2685 * Claim DMA memory for per IO sense data.
2686 */ 2686 */
2687 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; 2687 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
@@ -2693,7 +2693,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2693 snsinfo = fcpim->snsbase.kva; 2693 snsinfo = fcpim->snsbase.kva;
2694 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); 2694 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2695 2695
2696 /** 2696 /*
2697 * Initialize ioim free queues 2697 * Initialize ioim free queues
2698 */ 2698 */
2699 INIT_LIST_HEAD(&fcpim->ioim_free_q); 2699 INIT_LIST_HEAD(&fcpim->ioim_free_q);
@@ -2722,7 +2722,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2722 } 2722 }
2723} 2723}
2724 2724
2725/** 2725/*
2726 * Driver detach time call. 2726 * Driver detach time call.
2727 */ 2727 */
2728void 2728void
@@ -2858,7 +2858,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2858 io_lat->max[index] : val; 2858 io_lat->max[index] : val;
2859 io_lat->avg[index] += val; 2859 io_lat->avg[index] += val;
2860} 2860}
2861/** 2861/*
2862 * Called by itnim to clean up IO while going offline. 2862 * Called by itnim to clean up IO while going offline.
2863 */ 2863 */
2864void 2864void
@@ -2881,7 +2881,7 @@ bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2881 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); 2881 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2882} 2882}
2883 2883
2884/** 2884/*
2885 * IOC failure handling. 2885 * IOC failure handling.
2886 */ 2886 */
2887void 2887void
@@ -2892,7 +2892,7 @@ bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2892 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); 2892 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2893} 2893}
2894 2894
2895/** 2895/*
2896 * IO offline TOV popped. Fail the pending IO. 2896 * IO offline TOV popped. Fail the pending IO.
2897 */ 2897 */
2898void 2898void
@@ -2904,11 +2904,11 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim)
2904 2904
2905 2905
2906 2906
2907/** 2907/*
2908 * hal_ioim_api 2908 * hal_ioim_api
2909 */ 2909 */
2910 2910
2911/** 2911/*
2912 * Allocate IOIM resource for initiator mode I/O request. 2912 * Allocate IOIM resource for initiator mode I/O request.
2913 */ 2913 */
2914struct bfa_ioim_s * 2914struct bfa_ioim_s *
@@ -2918,7 +2918,7 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2918 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2918 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2919 struct bfa_ioim_s *ioim; 2919 struct bfa_ioim_s *ioim;
2920 2920
2921 /** 2921 /*
2922 * alocate IOIM resource 2922 * alocate IOIM resource
2923 */ 2923 */
2924 bfa_q_deq(&fcpim->ioim_free_q, &ioim); 2924 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
@@ -2969,7 +2969,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
2969 2969
2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2970 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2971 2971
2972 /** 2972 /*
2973 * Obtain the queue over which this request has to be issued 2973 * Obtain the queue over which this request has to be issued
2974 */ 2974 */
2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? 2975 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
@@ -2979,7 +2979,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START); 2979 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2980} 2980}
2981 2981
2982/** 2982/*
2983 * Driver I/O abort request. 2983 * Driver I/O abort request.
2984 */ 2984 */
2985bfa_status_t 2985bfa_status_t
@@ -2998,11 +2998,11 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
2998} 2998}
2999 2999
3000 3000
3001/** 3001/*
3002 * BFA TSKIM state machine functions 3002 * BFA TSKIM state machine functions
3003 */ 3003 */
3004 3004
3005/** 3005/*
3006 * Task management command beginning state. 3006 * Task management command beginning state.
3007 */ 3007 */
3008static void 3008static void
@@ -3015,7 +3015,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3015 bfa_sm_set_state(tskim, bfa_tskim_sm_active); 3015 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3016 bfa_tskim_gather_ios(tskim); 3016 bfa_tskim_gather_ios(tskim);
3017 3017
3018 /** 3018 /*
3019 * If device is offline, do not send TM on wire. Just cleanup 3019 * If device is offline, do not send TM on wire. Just cleanup
3020 * any pending IO requests and complete TM request. 3020 * any pending IO requests and complete TM request.
3021 */ 3021 */
@@ -3039,7 +3039,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3039 } 3039 }
3040} 3040}
3041 3041
3042/** 3042/*
3043 * brief 3043 * brief
3044 * TM command is active, awaiting completion from firmware to 3044 * TM command is active, awaiting completion from firmware to
3045 * cleanup IO requests in TM scope. 3045 * cleanup IO requests in TM scope.
@@ -3076,7 +3076,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3076 } 3076 }
3077} 3077}
3078 3078
3079/** 3079/*
3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup 3080 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3081 * completion event from firmware. 3081 * completion event from firmware.
3082 */ 3082 */
@@ -3087,7 +3087,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3087 3087
3088 switch (event) { 3088 switch (event) {
3089 case BFA_TSKIM_SM_DONE: 3089 case BFA_TSKIM_SM_DONE:
3090 /** 3090 /*
3091 * Ignore and wait for ABORT completion from firmware. 3091 * Ignore and wait for ABORT completion from firmware.
3092 */ 3092 */
3093 break; 3093 break;
@@ -3120,7 +3120,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3120 break; 3120 break;
3121 3121
3122 case BFA_TSKIM_SM_CLEANUP: 3122 case BFA_TSKIM_SM_CLEANUP:
3123 /** 3123 /*
3124 * Ignore, TM command completed on wire. 3124 * Ignore, TM command completed on wire.
3125 * Notify TM conmpletion on IO cleanup completion. 3125 * Notify TM conmpletion on IO cleanup completion.
3126 */ 3126 */
@@ -3137,7 +3137,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3137 } 3137 }
3138} 3138}
3139 3139
3140/** 3140/*
3141 * Task management command is waiting for room in request CQ 3141 * Task management command is waiting for room in request CQ
3142 */ 3142 */
3143static void 3143static void
@@ -3152,7 +3152,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3152 break; 3152 break;
3153 3153
3154 case BFA_TSKIM_SM_CLEANUP: 3154 case BFA_TSKIM_SM_CLEANUP:
3155 /** 3155 /*
3156 * No need to send TM on wire since ITN is offline. 3156 * No need to send TM on wire since ITN is offline.
3157 */ 3157 */
3158 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 3158 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
@@ -3172,7 +3172,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3172 } 3172 }
3173} 3173}
3174 3174
3175/** 3175/*
3176 * Task management command is active, awaiting for room in request CQ 3176 * Task management command is active, awaiting for room in request CQ
3177 * to send clean up request. 3177 * to send clean up request.
3178 */ 3178 */
@@ -3185,7 +3185,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3185 switch (event) { 3185 switch (event) {
3186 case BFA_TSKIM_SM_DONE: 3186 case BFA_TSKIM_SM_DONE:
3187 bfa_reqq_wcancel(&tskim->reqq_wait); 3187 bfa_reqq_wcancel(&tskim->reqq_wait);
3188 /** 3188 /*
3189 * 3189 *
3190 * Fall through !!! 3190 * Fall through !!!
3191 */ 3191 */
@@ -3207,7 +3207,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3207 } 3207 }
3208} 3208}
3209 3209
3210/** 3210/*
3211 * BFA callback is pending 3211 * BFA callback is pending
3212 */ 3212 */
3213static void 3213static void
@@ -3235,7 +3235,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3235 3235
3236 3236
3237 3237
3238/** 3238/*
3239 * hal_tskim_private 3239 * hal_tskim_private
3240 */ 3240 */
3241 3241
@@ -3288,7 +3288,7 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3288 return BFA_FALSE; 3288 return BFA_FALSE;
3289} 3289}
3290 3290
3291/** 3291/*
3292 * Gather affected IO requests and task management commands. 3292 * Gather affected IO requests and task management commands.
3293 */ 3293 */
3294static void 3294static void
@@ -3300,7 +3300,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3300 3300
3301 INIT_LIST_HEAD(&tskim->io_q); 3301 INIT_LIST_HEAD(&tskim->io_q);
3302 3302
3303 /** 3303 /*
3304 * Gather any active IO requests first. 3304 * Gather any active IO requests first.
3305 */ 3305 */
3306 list_for_each_safe(qe, qen, &itnim->io_q) { 3306 list_for_each_safe(qe, qen, &itnim->io_q) {
@@ -3312,7 +3312,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3312 } 3312 }
3313 } 3313 }
3314 3314
3315 /** 3315 /*
3316 * Failback any pending IO requests immediately. 3316 * Failback any pending IO requests immediately.
3317 */ 3317 */
3318 list_for_each_safe(qe, qen, &itnim->pending_q) { 3318 list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -3326,7 +3326,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3326 } 3326 }
3327} 3327}
3328 3328
3329/** 3329/*
3330 * IO cleanup completion 3330 * IO cleanup completion
3331 */ 3331 */
3332static void 3332static void
@@ -3338,7 +3338,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg)
3338 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); 3338 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3339} 3339}
3340 3340
3341/** 3341/*
3342 * Gather affected IO requests and task management commands. 3342 * Gather affected IO requests and task management commands.
3343 */ 3343 */
3344static void 3344static void
@@ -3358,7 +3358,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3358 bfa_wc_wait(&tskim->wc); 3358 bfa_wc_wait(&tskim->wc);
3359} 3359}
3360 3360
3361/** 3361/*
3362 * Send task management request to firmware. 3362 * Send task management request to firmware.
3363 */ 3363 */
3364static bfa_boolean_t 3364static bfa_boolean_t
@@ -3367,14 +3367,14 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
3367 struct bfa_itnim_s *itnim = tskim->itnim; 3367 struct bfa_itnim_s *itnim = tskim->itnim;
3368 struct bfi_tskim_req_s *m; 3368 struct bfi_tskim_req_s *m;
3369 3369
3370 /** 3370 /*
3371 * check for room in queue to send request now 3371 * check for room in queue to send request now
3372 */ 3372 */
3373 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3373 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3374 if (!m) 3374 if (!m)
3375 return BFA_FALSE; 3375 return BFA_FALSE;
3376 3376
3377 /** 3377 /*
3378 * build i/o request message next 3378 * build i/o request message next
3379 */ 3379 */
3380 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, 3380 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
@@ -3386,14 +3386,14 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
3386 m->lun = tskim->lun; 3386 m->lun = tskim->lun;
3387 m->tm_flags = tskim->tm_cmnd; 3387 m->tm_flags = tskim->tm_cmnd;
3388 3388
3389 /** 3389 /*
3390 * queue I/O message to firmware 3390 * queue I/O message to firmware
3391 */ 3391 */
3392 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3392 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3393 return BFA_TRUE; 3393 return BFA_TRUE;
3394} 3394}
3395 3395
3396/** 3396/*
3397 * Send abort request to cleanup an active TM to firmware. 3397 * Send abort request to cleanup an active TM to firmware.
3398 */ 3398 */
3399static bfa_boolean_t 3399static bfa_boolean_t
@@ -3402,14 +3402,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3402 struct bfa_itnim_s *itnim = tskim->itnim; 3402 struct bfa_itnim_s *itnim = tskim->itnim;
3403 struct bfi_tskim_abortreq_s *m; 3403 struct bfi_tskim_abortreq_s *m;
3404 3404
3405 /** 3405 /*
3406 * check for room in queue to send request now 3406 * check for room in queue to send request now
3407 */ 3407 */
3408 m = bfa_reqq_next(tskim->bfa, itnim->reqq); 3408 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3409 if (!m) 3409 if (!m)
3410 return BFA_FALSE; 3410 return BFA_FALSE;
3411 3411
3412 /** 3412 /*
3413 * build i/o request message next 3413 * build i/o request message next
3414 */ 3414 */
3415 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, 3415 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
@@ -3417,14 +3417,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3417 3417
3418 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3418 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3419 3419
3420 /** 3420 /*
3421 * queue I/O message to firmware 3421 * queue I/O message to firmware
3422 */ 3422 */
3423 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3423 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3424 return BFA_TRUE; 3424 return BFA_TRUE;
3425} 3425}
3426 3426
3427/** 3427/*
3428 * Call to resume task management cmnd waiting for room in request queue. 3428 * Call to resume task management cmnd waiting for room in request queue.
3429 */ 3429 */
3430static void 3430static void
@@ -3436,7 +3436,7 @@ bfa_tskim_qresume(void *cbarg)
3436 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); 3436 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3437} 3437}
3438 3438
3439/** 3439/*
3440 * Cleanup IOs associated with a task mangement command on IOC failures. 3440 * Cleanup IOs associated with a task mangement command on IOC failures.
3441 */ 3441 */
3442static void 3442static void
@@ -3453,11 +3453,11 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3453 3453
3454 3454
3455 3455
3456/** 3456/*
3457 * hal_tskim_friend 3457 * hal_tskim_friend
3458 */ 3458 */
3459 3459
3460/** 3460/*
3461 * Notification on completions from related ioim. 3461 * Notification on completions from related ioim.
3462 */ 3462 */
3463void 3463void
@@ -3466,7 +3466,7 @@ bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3466 bfa_wc_down(&tskim->wc); 3466 bfa_wc_down(&tskim->wc);
3467} 3467}
3468 3468
3469/** 3469/*
3470 * Handle IOC h/w failure notification from itnim. 3470 * Handle IOC h/w failure notification from itnim.
3471 */ 3471 */
3472void 3472void
@@ -3477,7 +3477,7 @@ bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); 3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3478} 3478}
3479 3479
3480/** 3480/*
3481 * Cleanup TM command and associated IOs as part of ITNIM offline. 3481 * Cleanup TM command and associated IOs as part of ITNIM offline.
3482 */ 3482 */
3483void 3483void
@@ -3488,7 +3488,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3488 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); 3488 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3489} 3489}
3490 3490
3491/** 3491/*
3492 * Memory allocation and initialization. 3492 * Memory allocation and initialization.
3493 */ 3493 */
3494void 3494void
@@ -3524,7 +3524,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3524void 3524void
3525bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) 3525bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
3526{ 3526{
3527 /** 3527 /*
3528 * @todo 3528 * @todo
3529 */ 3529 */
3530} 3530}
@@ -3542,7 +3542,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3542 3542
3543 tskim->tsk_status = rsp->tsk_status; 3543 tskim->tsk_status = rsp->tsk_status;
3544 3544
3545 /** 3545 /*
3546 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort 3546 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3547 * requests. All other statuses are for normal completions. 3547 * requests. All other statuses are for normal completions.
3548 */ 3548 */
@@ -3557,7 +3557,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3557 3557
3558 3558
3559 3559
3560/** 3560/*
3561 * hal_tskim_api 3561 * hal_tskim_api
3562 */ 3562 */
3563 3563
@@ -3584,7 +3584,7 @@ bfa_tskim_free(struct bfa_tskim_s *tskim)
3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); 3584 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3585} 3585}
3586 3586
3587/** 3587/*
3588 * Start a task management command. 3588 * Start a task management command.
3589 * 3589 *
3590 * @param[in] tskim BFA task management command instance 3590 * @param[in] tskim BFA task management command instance
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 88eb91a302fe..c94502dfac66 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfa_fcs.c BFA FCS main 19 * bfa_fcs.c BFA FCS main
20 */ 20 */
21 21
@@ -25,7 +25,7 @@
25 25
26BFA_TRC_FILE(FCS, FCS); 26BFA_TRC_FILE(FCS, FCS);
27 27
28/** 28/*
29 * FCS sub-modules 29 * FCS sub-modules
30 */ 30 */
31struct bfa_fcs_mod_s { 31struct bfa_fcs_mod_s {
@@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
43 bfa_fcs_fabric_modexit }, 43 bfa_fcs_fabric_modexit },
44}; 44};
45 45
46/** 46/*
47 * fcs_api BFA FCS API 47 * fcs_api BFA FCS API
48 */ 48 */
49 49
@@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
58 58
59 59
60 60
61/** 61/*
62 * fcs_api BFA FCS API 62 * fcs_api BFA FCS API
63 */ 63 */
64 64
65/** 65/*
66 * fcs attach -- called once to initialize data structures at driver attach time 66 * fcs attach -- called once to initialize data structures at driver attach time
67 */ 67 */
68void 68void
@@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
86 } 86 }
87} 87}
88 88
89/** 89/*
90 * fcs initialization, called once after bfa initialization is complete 90 * fcs initialization, called once after bfa initialization is complete
91 */ 91 */
92void 92void
@@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
110 } 110 }
111} 111}
112 112
113/** 113/*
114 * Start FCS operations. 114 * Start FCS operations.
115 */ 115 */
116void 116void
@@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
119 bfa_fcs_fabric_modstart(fcs); 119 bfa_fcs_fabric_modstart(fcs);
120} 120}
121 121
122/** 122/*
123 * brief 123 * brief
124 * FCS driver details initialization. 124 * FCS driver details initialization.
125 * 125 *
@@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
138 bfa_fcs_fabric_psymb_init(&fcs->fabric); 138 bfa_fcs_fabric_psymb_init(&fcs->fabric);
139} 139}
140 140
141/** 141/*
142 * brief 142 * brief
143 * FCS FDMI Driver Parameter Initialization 143 * FCS FDMI Driver Parameter Initialization
144 * 144 *
@@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
154 fcs->fdmi_enabled = fdmi_enable; 154 fcs->fdmi_enabled = fdmi_enable;
155 155
156} 156}
157/** 157/*
158 * brief 158 * brief
159 * FCS instance cleanup and exit. 159 * FCS instance cleanup and exit.
160 * 160 *
@@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
196 bfa_wc_down(&fcs->wc); 196 bfa_wc_down(&fcs->wc);
197} 197}
198 198
199/** 199/*
200 * Fabric module implementation. 200 * Fabric module implementation.
201 */ 201 */
202 202
@@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
232 u32 rsp_len, 232 u32 rsp_len,
233 u32 resid_len, 233 u32 resid_len,
234 struct fchs_s *rspfchs); 234 struct fchs_s *rspfchs);
235/** 235/*
236 * fcs_fabric_sm fabric state machine functions 236 * fcs_fabric_sm fabric state machine functions
237 */ 237 */
238 238
239/** 239/*
240 * Fabric state machine events 240 * Fabric state machine events
241 */ 241 */
242enum bfa_fcs_fabric_event { 242enum bfa_fcs_fabric_event {
@@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
286 enum bfa_fcs_fabric_event event); 286 enum bfa_fcs_fabric_event event);
287static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, 287static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
288 enum bfa_fcs_fabric_event event); 288 enum bfa_fcs_fabric_event event);
289/** 289/*
290 * Beginning state before fabric creation. 290 * Beginning state before fabric creation.
291 */ 291 */
292static void 292static void
@@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
312 } 312 }
313} 313}
314 314
315/** 315/*
316 * Beginning state before fabric creation. 316 * Beginning state before fabric creation.
317 */ 317 */
318static void 318static void
@@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
345 } 345 }
346} 346}
347 347
348/** 348/*
349 * Link is down, awaiting LINK UP event from port. This is also the 349 * Link is down, awaiting LINK UP event from port. This is also the
350 * first state at fabric creation. 350 * first state at fabric creation.
351 */ 351 */
@@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
375 } 375 }
376} 376}
377 377
378/** 378/*
379 * FLOGI is in progress, awaiting FLOGI reply. 379 * FLOGI is in progress, awaiting FLOGI reply.
380 */ 380 */
381static void 381static void
@@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
468 } 468 }
469} 469}
470 470
471/** 471/*
472 * Authentication is in progress, awaiting authentication results. 472 * Authentication is in progress, awaiting authentication results.
473 */ 473 */
474static void 474static void
@@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
508 } 508 }
509} 509}
510 510
511/** 511/*
512 * Authentication failed 512 * Authentication failed
513 */ 513 */
514static void 514static void
@@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
534 } 534 }
535} 535}
536 536
537/** 537/*
538 * Port is in loopback mode. 538 * Port is in loopback mode.
539 */ 539 */
540static void 540static void
@@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
560 } 560 }
561} 561}
562 562
563/** 563/*
564 * There is no attached fabric - private loop or NPort-to-NPort topology. 564 * There is no attached fabric - private loop or NPort-to-NPort topology.
565 */ 565 */
566static void 566static void
@@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
593 } 593 }
594} 594}
595 595
596/** 596/*
597 * Fabric is online - normal operating state. 597 * Fabric is online - normal operating state.
598 */ 598 */
599static void 599static void
@@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
628 } 628 }
629} 629}
630 630
631/** 631/*
632 * Exchanging virtual fabric parameters. 632 * Exchanging virtual fabric parameters.
633 */ 633 */
634static void 634static void
@@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
652 } 652 }
653} 653}
654 654
655/** 655/*
656 * EVFP exchange complete and VFT tagging is enabled. 656 * EVFP exchange complete and VFT tagging is enabled.
657 */ 657 */
658static void 658static void
@@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
663 bfa_trc(fabric->fcs, event); 663 bfa_trc(fabric->fcs, event);
664} 664}
665 665
666/** 666/*
667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). 667 * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
668 */ 668 */
669static void 669static void
@@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
684 fabric->event_arg.swp_vfid); 684 fabric->event_arg.swp_vfid);
685} 685}
686 686
687/** 687/*
688 * Fabric is being deleted, awaiting vport delete completions. 688 * Fabric is being deleted, awaiting vport delete completions.
689 */ 689 */
690static void 690static void
@@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
714 714
715 715
716 716
717/** 717/*
718 * fcs_fabric_private fabric private functions 718 * fcs_fabric_private fabric private functions
719 */ 719 */
720 720
@@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); 728 port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
729} 729}
730 730
731/** 731/*
732 * Port Symbolic Name Creation for base port. 732 * Port Symbolic Name Creation for base port.
733 */ 733 */
734void 734void
@@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; 789 port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
790} 790}
791 791
792/** 792/*
793 * bfa lps login completion callback 793 * bfa lps login completion callback
794 */ 794 */
795void 795void
@@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
867 bfa_trc(fabric->fcs, fabric->is_npiv); 867 bfa_trc(fabric->fcs, fabric->is_npiv);
868 bfa_trc(fabric->fcs, fabric->is_auth); 868 bfa_trc(fabric->fcs, fabric->is_auth);
869} 869}
870/** 870/*
871 * Allocate and send FLOGI. 871 * Allocate and send FLOGI.
872 */ 872 */
873static void 873static void
@@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
897 bfa_fcs_fabric_set_opertype(fabric); 897 bfa_fcs_fabric_set_opertype(fabric);
898 fabric->stats.fabric_onlines++; 898 fabric->stats.fabric_onlines++;
899 899
900 /** 900 /*
901 * notify online event to base and then virtual ports 901 * notify online event to base and then virtual ports
902 */ 902 */
903 bfa_fcs_lport_online(&fabric->bport); 903 bfa_fcs_lport_online(&fabric->bport);
@@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
917 bfa_trc(fabric->fcs, fabric->fabric_name); 917 bfa_trc(fabric->fcs, fabric->fabric_name);
918 fabric->stats.fabric_offlines++; 918 fabric->stats.fabric_offlines++;
919 919
920 /** 920 /*
921 * notify offline event first to vports and then base port. 921 * notify offline event first to vports and then base port.
922 */ 922 */
923 list_for_each_safe(qe, qen, &fabric->vport_q) { 923 list_for_each_safe(qe, qen, &fabric->vport_q) {
@@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg)
939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); 939 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
940} 940}
941 941
942/** 942/*
943 * Delete all vports and wait for vport delete completions. 943 * Delete all vports and wait for vport delete completions.
944 */ 944 */
945static void 945static void
@@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); 965 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
966} 966}
967 967
968/** 968/*
969 * fcs_fabric_public fabric public functions 969 * fcs_fabric_public fabric public functions
970 */ 970 */
971 971
972/** 972/*
973 * Attach time initialization. 973 * Attach time initialization.
974 */ 974 */
975void 975void
@@ -980,7 +980,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
980 fabric = &fcs->fabric; 980 fabric = &fcs->fabric;
981 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); 981 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
982 982
983 /** 983 /*
984 * Initialize base fabric. 984 * Initialize base fabric.
985 */ 985 */
986 fabric->fcs = fcs; 986 fabric->fcs = fcs;
@@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
989 fabric->lps = bfa_lps_alloc(fcs->bfa); 989 fabric->lps = bfa_lps_alloc(fcs->bfa);
990 bfa_assert(fabric->lps); 990 bfa_assert(fabric->lps);
991 991
992 /** 992 /*
993 * Initialize fabric delete completion handler. Fabric deletion is 993 * Initialize fabric delete completion handler. Fabric deletion is
994 * complete when the last vport delete is complete. 994 * complete when the last vport delete is complete.
995 */ 995 */
@@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
1007 bfa_trc(fcs, 0); 1007 bfa_trc(fcs, 0);
1008} 1008}
1009 1009
1010/** 1010/*
1011 * Module cleanup 1011 * Module cleanup
1012 */ 1012 */
1013void 1013void
@@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1017 1017
1018 bfa_trc(fcs, 0); 1018 bfa_trc(fcs, 0);
1019 1019
1020 /** 1020 /*
1021 * Cleanup base fabric. 1021 * Cleanup base fabric.
1022 */ 1022 */
1023 fabric = &fcs->fabric; 1023 fabric = &fcs->fabric;
@@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); 1025 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
1026} 1026}
1027 1027
1028/** 1028/*
1029 * Fabric module start -- kick starts FCS actions 1029 * Fabric module start -- kick starts FCS actions
1030 */ 1030 */
1031void 1031void
@@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); 1038 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
1039} 1039}
1040 1040
1041/** 1041/*
1042 * Suspend fabric activity as part of driver suspend. 1042 * Suspend fabric activity as part of driver suspend.
1043 */ 1043 */
1044void 1044void
@@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
1064 return fabric->oper_type; 1064 return fabric->oper_type;
1065} 1065}
1066 1066
1067/** 1067/*
1068 * Link up notification from BFA physical port module. 1068 * Link up notification from BFA physical port module.
1069 */ 1069 */
1070void 1070void
@@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); 1074 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
1075} 1075}
1076 1076
1077/** 1077/*
1078 * Link down notification from BFA physical port module. 1078 * Link down notification from BFA physical port module.
1079 */ 1079 */
1080void 1080void
@@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); 1084 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
1085} 1085}
1086 1086
1087/** 1087/*
1088 * A child vport is being created in the fabric. 1088 * A child vport is being created in the fabric.
1089 * 1089 *
1090 * Call from vport module at vport creation. A list of base port and vports 1090 * Call from vport module at vport creation. A list of base port and vports
@@ -1099,7 +1099,7 @@ void
1099bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 1099bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
1100 struct bfa_fcs_vport_s *vport) 1100 struct bfa_fcs_vport_s *vport)
1101{ 1101{
1102 /** 1102 /*
1103 * - add vport to fabric's vport_q 1103 * - add vport to fabric's vport_q
1104 */ 1104 */
1105 bfa_trc(fabric->fcs, fabric->vf_id); 1105 bfa_trc(fabric->fcs, fabric->vf_id);
@@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
1109 bfa_wc_up(&fabric->wc); 1109 bfa_wc_up(&fabric->wc);
1110} 1110}
1111 1111
1112/** 1112/*
1113 * A child vport is being deleted from fabric. 1113 * A child vport is being deleted from fabric.
1114 * 1114 *
1115 * Vport is being deleted. 1115 * Vport is being deleted.
@@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
1123 bfa_wc_down(&fabric->wc); 1123 bfa_wc_down(&fabric->wc);
1124} 1124}
1125 1125
1126/** 1126/*
1127 * Base port is deleted. 1127 * Base port is deleted.
1128 */ 1128 */
1129void 1129void
@@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
1133} 1133}
1134 1134
1135 1135
1136/** 1136/*
1137 * Check if fabric is online. 1137 * Check if fabric is online.
1138 * 1138 *
1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf. 1139 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); 1146 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
1147} 1147}
1148 1148
1149/** 1149/*
1150 * brief 1150 * brief
1151 * 1151 *
1152 */ 1152 */
@@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
1158 return BFA_STATUS_OK; 1158 return BFA_STATUS_OK;
1159} 1159}
1160 1160
1161/** 1161/*
1162 * Lookup for a vport withing a fabric given its pwwn 1162 * Lookup for a vport withing a fabric given its pwwn
1163 */ 1163 */
1164struct bfa_fcs_vport_s * 1164struct bfa_fcs_vport_s *
@@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
1176 return NULL; 1176 return NULL;
1177} 1177}
1178 1178
1179/** 1179/*
1180 * In a given fabric, return the number of lports. 1180 * In a given fabric, return the number of lports.
1181 * 1181 *
1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf. 1182 * param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
1214 1214
1215 return oui; 1215 return oui;
1216} 1216}
1217/** 1217/*
1218 * Unsolicited frame receive handling. 1218 * Unsolicited frame receive handling.
1219 */ 1219 */
1220void 1220void
@@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1230 bfa_trc(fabric->fcs, len); 1230 bfa_trc(fabric->fcs, len);
1231 bfa_trc(fabric->fcs, pid); 1231 bfa_trc(fabric->fcs, pid);
1232 1232
1233 /** 1233 /*
1234 * Look for our own FLOGI frames being looped back. This means an 1234 * Look for our own FLOGI frames being looped back. This means an
1235 * external loopback cable is in place. Our own FLOGI frames are 1235 * external loopback cable is in place. Our own FLOGI frames are
1236 * sometimes looped back when switch port gets temporarily bypassed. 1236 * sometimes looped back when switch port gets temporarily bypassed.
@@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1242 return; 1242 return;
1243 } 1243 }
1244 1244
1245 /** 1245 /*
1246 * FLOGI/EVFP exchanges should be consumed by base fabric. 1246 * FLOGI/EVFP exchanges should be consumed by base fabric.
1247 */ 1247 */
1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { 1248 if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
@@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1252 } 1252 }
1253 1253
1254 if (fabric->bport.pid == pid) { 1254 if (fabric->bport.pid == pid) {
1255 /** 1255 /*
1256 * All authentication frames should be routed to auth 1256 * All authentication frames should be routed to auth
1257 */ 1257 */
1258 bfa_trc(fabric->fcs, els_cmd->els_code); 1258 bfa_trc(fabric->fcs, els_cmd->els_code);
@@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1266 return; 1266 return;
1267 } 1267 }
1268 1268
1269 /** 1269 /*
1270 * look for a matching local port ID 1270 * look for a matching local port ID
1271 */ 1271 */
1272 list_for_each(qe, &fabric->vport_q) { 1272 list_for_each(qe, &fabric->vport_q) {
@@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); 1280 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1281} 1281}
1282 1282
1283/** 1283/*
1284 * Unsolicited frames to be processed by fabric. 1284 * Unsolicited frames to be processed by fabric.
1285 */ 1285 */
1286static void 1286static void
@@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1304 } 1304 }
1305} 1305}
1306 1306
1307/** 1307/*
1308 * Process incoming FLOGI 1308 * Process incoming FLOGI
1309 */ 1309 */
1310static void 1310static void
@@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1351 struct fchs_s fchs; 1351 struct fchs_s fchs;
1352 1352
1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); 1353 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
1354 /** 1354 /*
1355 * Do not expect this failure -- expect remote node to retry 1355 * Do not expect this failure -- expect remote node to retry
1356 */ 1356 */
1357 if (!fcxp) 1357 if (!fcxp)
@@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1370 FC_MAX_PDUSZ, 0); 1370 FC_MAX_PDUSZ, 0);
1371} 1371}
1372 1372
1373/** 1373/*
1374 * Flogi Acc completion callback. 1374 * Flogi Acc completion callback.
1375 */ 1375 */
1376static void 1376static void
@@ -1417,7 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1417 } 1417 }
1418} 1418}
1419 1419
1420/** 1420/*
1421 * Returns FCS vf structure for a given vf_id. 1421 * Returns FCS vf structure for a given vf_id.
1422 * 1422 *
1423 * param[in] vf_id - VF_ID 1423 * param[in] vf_id - VF_ID
@@ -1435,7 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
1435 return NULL; 1435 return NULL;
1436} 1436}
1437 1437
1438/** 1438/*
1439 * BFA FCS PPORT ( physical port) 1439 * BFA FCS PPORT ( physical port)
1440 */ 1440 */
1441static void 1441static void
@@ -1465,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
1465 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); 1465 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
1466} 1466}
1467 1467
1468/** 1468/*
1469 * BFA FCS UF ( Unsolicited Frames) 1469 * BFA FCS UF ( Unsolicited Frames)
1470 */ 1470 */
1471 1471
1472/** 1472/*
1473 * BFA callback for unsolicited frame receive handler. 1473 * BFA callback for unsolicited frame receive handler.
1474 * 1474 *
1475 * @param[in] cbarg callback arg for receive handler 1475 * @param[in] cbarg callback arg for receive handler
@@ -1486,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1486 struct fc_vft_s *vft; 1486 struct fc_vft_s *vft;
1487 struct bfa_fcs_fabric_s *fabric; 1487 struct bfa_fcs_fabric_s *fabric;
1488 1488
1489 /** 1489 /*
1490 * check for VFT header 1490 * check for VFT header
1491 */ 1491 */
1492 if (fchs->routing == FC_RTG_EXT_HDR && 1492 if (fchs->routing == FC_RTG_EXT_HDR &&
@@ -1498,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1498 else 1498 else
1499 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); 1499 fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
1500 1500
1501 /** 1501 /*
1502 * drop frame if vfid is unknown 1502 * drop frame if vfid is unknown
1503 */ 1503 */
1504 if (!fabric) { 1504 if (!fabric) {
@@ -1508,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1508 return; 1508 return;
1509 } 1509 }
1510 1510
1511 /** 1511 /*
1512 * skip vft header 1512 * skip vft header
1513 */ 1513 */
1514 fchs = (struct fchs_s *) (vft + 1); 1514 fchs = (struct fchs_s *) (vft + 1);
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index faca5f3b0b1f..9662bcdeb41d 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * fcpim.c - FCP initiator mode i-t nexus state machine 19 * fcpim.c - FCP initiator mode i-t nexus state machine
20 */ 20 */
21 21
@@ -38,7 +38,7 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
38 bfa_status_t req_status, u32 rsp_len, 38 bfa_status_t req_status, u32 rsp_len,
39 u32 resid_len, struct fchs_s *rsp_fchs); 39 u32 resid_len, struct fchs_s *rsp_fchs);
40 40
41/** 41/*
42 * fcs_itnim_sm FCS itnim state machine events 42 * fcs_itnim_sm FCS itnim state machine events
43 */ 43 */
44 44
@@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, 84 {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
85}; 85};
86 86
87/** 87/*
88 * fcs_itnim_sm FCS itnim state machine 88 * fcs_itnim_sm FCS itnim state machine
89 */ 89 */
90 90
@@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
494 494
495 495
496 496
497/** 497/*
498 * itnim_public FCS ITNIM public interfaces 498 * itnim_public FCS ITNIM public interfaces
499 */ 499 */
500 500
501/** 501/*
502 * Called by rport when a new rport is created. 502 * Called by rport when a new rport is created.
503 * 503 *
504 * @param[in] rport - remote port. 504 * @param[in] rport - remote port.
@@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
554 return itnim; 554 return itnim;
555} 555}
556 556
557/** 557/*
558 * Called by rport to delete the instance of FCPIM. 558 * Called by rport to delete the instance of FCPIM.
559 * 559 *
560 * @param[in] rport - remote port. 560 * @param[in] rport - remote port.
@@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); 566 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
567} 567}
568 568
569/** 569/*
570 * Notification from rport that PLOGI is complete to initiate FC-4 session. 570 * Notification from rport that PLOGI is complete to initiate FC-4 session.
571 */ 571 */
572void 572void
@@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
586 } 586 }
587} 587}
588 588
589/** 589/*
590 * Called by rport to handle a remote device offline. 590 * Called by rport to handle a remote device offline.
591 */ 591 */
592void 592void
@@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); 596 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
597} 597}
598 598
599/** 599/*
600 * Called by rport when remote port is known to be an initiator from 600 * Called by rport when remote port is known to be an initiator from
601 * PRLI received. 601 * PRLI received.
602 */ 602 */
@@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); 608 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
609} 609}
610 610
611/** 611/*
612 * Called by rport to check if the itnim is online. 612 * Called by rport to check if the itnim is online.
613 */ 613 */
614bfa_status_t 614bfa_status_t
@@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
625 } 625 }
626} 626}
627 627
628/** 628/*
629 * BFA completion callback for bfa_itnim_online(). 629 * BFA completion callback for bfa_itnim_online().
630 */ 630 */
631void 631void
@@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg)
637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); 637 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
638} 638}
639 639
640/** 640/*
641 * BFA completion callback for bfa_itnim_offline(). 641 * BFA completion callback for bfa_itnim_offline().
642 */ 642 */
643void 643void
@@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg)
649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); 649 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
650} 650}
651 651
652/** 652/*
653 * Mark the beginning of PATH TOV handling. IO completion callbacks 653 * Mark the beginning of PATH TOV handling. IO completion callbacks
654 * are still pending. 654 * are still pending.
655 */ 655 */
@@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
661 bfa_trc(itnim->fcs, itnim->rport->pwwn); 661 bfa_trc(itnim->fcs, itnim->rport->pwwn);
662} 662}
663 663
664/** 664/*
665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. 665 * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
666 */ 666 */
667void 667void
@@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg)
674 itnim_drv->state = ITNIM_STATE_TIMEOUT; 674 itnim_drv->state = ITNIM_STATE_TIMEOUT;
675} 675}
676 676
677/** 677/*
678 * BFA notification to FCS/driver for second level error recovery. 678 * BFA notification to FCS/driver for second level error recovery.
679 * 679 *
680 * Atleast one I/O request has timedout and target is unresponsive to 680 * Atleast one I/O request has timedout and target is unresponsive to
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 263a7bfa16d8..377cbfff6f2e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -15,10 +15,6 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/**
19 * bfa_fcs_lport.c BFA FCS port
20 */
21
22#include "bfa_fcs.h" 18#include "bfa_fcs.h"
23#include "bfa_fcbuild.h" 19#include "bfa_fcbuild.h"
24#include "bfa_fc.h" 20#include "bfa_fc.h"
@@ -26,10 +22,6 @@
26 22
27BFA_TRC_FILE(FCS, PORT); 23BFA_TRC_FILE(FCS, PORT);
28 24
29/**
30 * Forward declarations
31 */
32
33static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 25static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
34 struct fchs_s *rx_fchs, u8 reason_code, 26 struct fchs_s *rx_fchs, u8 reason_code,
35 u8 reason_code_expl); 27 u8 reason_code_expl);
@@ -72,7 +64,7 @@ static struct {
72 bfa_fcs_lport_n2n_offline}, 64 bfa_fcs_lport_n2n_offline},
73 }; 65 };
74 66
75/** 67/*
76 * fcs_port_sm FCS logical port state machine 68 * fcs_port_sm FCS logical port state machine
77 */ 69 */
78 70
@@ -240,7 +232,7 @@ bfa_fcs_lport_sm_deleting(
240 } 232 }
241} 233}
242 234
243/** 235/*
244 * fcs_port_pvt 236 * fcs_port_pvt
245 */ 237 */
246 238
@@ -272,7 +264,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
272 FC_MAX_PDUSZ, 0); 264 FC_MAX_PDUSZ, 0);
273} 265}
274 266
275/** 267/*
276 * Process incoming plogi from a remote port. 268 * Process incoming plogi from a remote port.
277 */ 269 */
278static void 270static void
@@ -303,7 +295,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
303 return; 295 return;
304 } 296 }
305 297
306 /** 298 /*
307 * Direct Attach P2P mode : verify address assigned by the r-port. 299 * Direct Attach P2P mode : verify address assigned by the r-port.
308 */ 300 */
309 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && 301 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -319,12 +311,12 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
319 port->pid = rx_fchs->d_id; 311 port->pid = rx_fchs->d_id;
320 } 312 }
321 313
322 /** 314 /*
323 * First, check if we know the device by pwwn. 315 * First, check if we know the device by pwwn.
324 */ 316 */
325 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); 317 rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
326 if (rport) { 318 if (rport) {
327 /** 319 /*
328 * Direct Attach P2P mode : handle address assigned by r-port. 320 * Direct Attach P2P mode : handle address assigned by r-port.
329 */ 321 */
330 if ((!bfa_fcs_fabric_is_switched(port->fabric)) && 322 if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -337,37 +329,37 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
337 return; 329 return;
338 } 330 }
339 331
340 /** 332 /*
341 * Next, lookup rport by PID. 333 * Next, lookup rport by PID.
342 */ 334 */
343 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); 335 rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
344 if (!rport) { 336 if (!rport) {
345 /** 337 /*
346 * Inbound PLOGI from a new device. 338 * Inbound PLOGI from a new device.
347 */ 339 */
348 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 340 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
349 return; 341 return;
350 } 342 }
351 343
352 /** 344 /*
353 * Rport is known only by PID. 345 * Rport is known only by PID.
354 */ 346 */
355 if (rport->pwwn) { 347 if (rport->pwwn) {
356 /** 348 /*
357 * This is a different device with the same pid. Old device 349 * This is a different device with the same pid. Old device
358 * disappeared. Send implicit LOGO to old device. 350 * disappeared. Send implicit LOGO to old device.
359 */ 351 */
360 bfa_assert(rport->pwwn != plogi->port_name); 352 bfa_assert(rport->pwwn != plogi->port_name);
361 bfa_fcs_rport_logo_imp(rport); 353 bfa_fcs_rport_logo_imp(rport);
362 354
363 /** 355 /*
364 * Inbound PLOGI from a new device (with old PID). 356 * Inbound PLOGI from a new device (with old PID).
365 */ 357 */
366 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); 358 bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
367 return; 359 return;
368 } 360 }
369 361
370 /** 362 /*
371 * PLOGI crossing each other. 363 * PLOGI crossing each other.
372 */ 364 */
373 bfa_assert(rport->pwwn == WWN_NULL); 365 bfa_assert(rport->pwwn == WWN_NULL);
@@ -598,10 +590,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
598 590
599 591
600 592
601/** 593/*
602 * fcs_lport_api BFA FCS port API 594 * fcs_lport_api BFA FCS port API
603 */ 595 */
604/** 596/*
605 * Module initialization 597 * Module initialization
606 */ 598 */
607void 599void
@@ -610,7 +602,7 @@ bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
610 602
611} 603}
612 604
613/** 605/*
614 * Module cleanup 606 * Module cleanup
615 */ 607 */
616void 608void
@@ -619,7 +611,7 @@ bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
619 bfa_fcs_modexit_comp(fcs); 611 bfa_fcs_modexit_comp(fcs);
620} 612}
621 613
622/** 614/*
623 * Unsolicited frame receive handling. 615 * Unsolicited frame receive handling.
624 */ 616 */
625void 617void
@@ -637,7 +629,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
637 return; 629 return;
638 } 630 }
639 631
640 /** 632 /*
641 * First, handle ELSs that donot require a login. 633 * First, handle ELSs that donot require a login.
642 */ 634 */
643 /* 635 /*
@@ -673,7 +665,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
673 bfa_fcs_lport_abts_acc(lport, fchs); 665 bfa_fcs_lport_abts_acc(lport, fchs);
674 return; 666 return;
675 } 667 }
676 /** 668 /*
677 * look for a matching remote port ID 669 * look for a matching remote port ID
678 */ 670 */
679 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); 671 rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
@@ -686,7 +678,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
686 return; 678 return;
687 } 679 }
688 680
689 /** 681 /*
690 * Only handles ELS frames for now. 682 * Only handles ELS frames for now.
691 */ 683 */
692 if (fchs->type != FC_TYPE_ELS) { 684 if (fchs->type != FC_TYPE_ELS) {
@@ -702,20 +694,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
702 } 694 }
703 695
704 if (els_cmd->els_code == FC_ELS_LOGO) { 696 if (els_cmd->els_code == FC_ELS_LOGO) {
705 /** 697 /*
706 * @todo Handle LOGO frames received. 698 * @todo Handle LOGO frames received.
707 */ 699 */
708 return; 700 return;
709 } 701 }
710 702
711 if (els_cmd->els_code == FC_ELS_PRLI) { 703 if (els_cmd->els_code == FC_ELS_PRLI) {
712 /** 704 /*
713 * @todo Handle PRLI frames received. 705 * @todo Handle PRLI frames received.
714 */ 706 */
715 return; 707 return;
716 } 708 }
717 709
718 /** 710 /*
719 * Unhandled ELS frames. Send a LS_RJT. 711 * Unhandled ELS frames. Send a LS_RJT.
720 */ 712 */
721 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, 713 bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
@@ -723,7 +715,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
723 715
724} 716}
725 717
726/** 718/*
727 * PID based Lookup for a R-Port in the Port R-Port Queue 719 * PID based Lookup for a R-Port in the Port R-Port Queue
728 */ 720 */
729struct bfa_fcs_rport_s * 721struct bfa_fcs_rport_s *
@@ -742,7 +734,7 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
742 return NULL; 734 return NULL;
743} 735}
744 736
745/** 737/*
746 * PWWN based Lookup for a R-Port in the Port R-Port Queue 738 * PWWN based Lookup for a R-Port in the Port R-Port Queue
747 */ 739 */
748struct bfa_fcs_rport_s * 740struct bfa_fcs_rport_s *
@@ -761,7 +753,7 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
761 return NULL; 753 return NULL;
762} 754}
763 755
764/** 756/*
765 * NWWN based Lookup for a R-Port in the Port R-Port Queue 757 * NWWN based Lookup for a R-Port in the Port R-Port Queue
766 */ 758 */
767struct bfa_fcs_rport_s * 759struct bfa_fcs_rport_s *
@@ -780,7 +772,7 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
780 return NULL; 772 return NULL;
781} 773}
782 774
783/** 775/*
784 * Called by rport module when new rports are discovered. 776 * Called by rport module when new rports are discovered.
785 */ 777 */
786void 778void
@@ -792,7 +784,7 @@ bfa_fcs_lport_add_rport(
792 port->num_rports++; 784 port->num_rports++;
793} 785}
794 786
795/** 787/*
796 * Called by rport module to when rports are deleted. 788 * Called by rport module to when rports are deleted.
797 */ 789 */
798void 790void
@@ -807,7 +799,7 @@ bfa_fcs_lport_del_rport(
807 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); 799 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
808} 800}
809 801
810/** 802/*
811 * Called by fabric for base port when fabric login is complete. 803 * Called by fabric for base port when fabric login is complete.
812 * Called by vport for virtual ports when FDISC is complete. 804 * Called by vport for virtual ports when FDISC is complete.
813 */ 805 */
@@ -817,7 +809,7 @@ bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
817 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); 809 bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
818} 810}
819 811
820/** 812/*
821 * Called by fabric for base port when fabric goes offline. 813 * Called by fabric for base port when fabric goes offline.
822 * Called by vport for virtual ports when virtual port becomes offline. 814 * Called by vport for virtual ports when virtual port becomes offline.
823 */ 815 */
@@ -827,7 +819,7 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
827 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); 819 bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
828} 820}
829 821
830/** 822/*
831 * Called by fabric to delete base lport and associated resources. 823 * Called by fabric to delete base lport and associated resources.
832 * 824 *
833 * Called by vport to delete lport and associated resources. Should call 825 * Called by vport to delete lport and associated resources. Should call
@@ -839,7 +831,7 @@ bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
839 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); 831 bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
840} 832}
841 833
842/** 834/*
843 * Return TRUE if port is online, else return FALSE 835 * Return TRUE if port is online, else return FALSE
844 */ 836 */
845bfa_boolean_t 837bfa_boolean_t
@@ -848,7 +840,7 @@ bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
848 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); 840 return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
849} 841}
850 842
851/** 843/*
852 * Attach time initialization of logical ports. 844 * Attach time initialization of logical ports.
853 */ 845 */
854void 846void
@@ -865,7 +857,7 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
865 lport->num_rports = 0; 857 lport->num_rports = 0;
866} 858}
867 859
868/** 860/*
869 * Logical port initialization of base or virtual port. 861 * Logical port initialization of base or virtual port.
870 * Called by fabric for base port or by vport for virtual ports. 862 * Called by fabric for base port or by vport for virtual ports.
871 */ 863 */
@@ -894,7 +886,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
894 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 886 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
895} 887}
896 888
897/** 889/*
898 * fcs_lport_api 890 * fcs_lport_api
899 */ 891 */
900 892
@@ -934,11 +926,11 @@ bfa_fcs_lport_get_attr(
934 } 926 }
935} 927}
936 928
937/** 929/*
938 * bfa_fcs_lport_fab port fab functions 930 * bfa_fcs_lport_fab port fab functions
939 */ 931 */
940 932
941/** 933/*
942 * Called by port to initialize fabric services of the base port. 934 * Called by port to initialize fabric services of the base port.
943 */ 935 */
944static void 936static void
@@ -949,7 +941,7 @@ bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
949 bfa_fcs_lport_ms_init(port); 941 bfa_fcs_lport_ms_init(port);
950} 942}
951 943
952/** 944/*
953 * Called by port to notify transition to online state. 945 * Called by port to notify transition to online state.
954 */ 946 */
955static void 947static void
@@ -959,7 +951,7 @@ bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
959 bfa_fcs_lport_scn_online(port); 951 bfa_fcs_lport_scn_online(port);
960} 952}
961 953
962/** 954/*
963 * Called by port to notify transition to offline state. 955 * Called by port to notify transition to offline state.
964 */ 956 */
965static void 957static void
@@ -970,11 +962,11 @@ bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
970 bfa_fcs_lport_ms_offline(port); 962 bfa_fcs_lport_ms_offline(port);
971} 963}
972 964
973/** 965/*
974 * bfa_fcs_lport_n2n functions 966 * bfa_fcs_lport_n2n functions
975 */ 967 */
976 968
977/** 969/*
978 * Called by fcs/port to initialize N2N topology. 970 * Called by fcs/port to initialize N2N topology.
979 */ 971 */
980static void 972static void
@@ -982,7 +974,7 @@ bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
982{ 974{
983} 975}
984 976
985/** 977/*
986 * Called by fcs/port to notify transition to online state. 978 * Called by fcs/port to notify transition to online state.
987 */ 979 */
988static void 980static void
@@ -1006,7 +998,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
1006 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, 998 ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
1007 sizeof(wwn_t)) > 0) { 999 sizeof(wwn_t)) > 0) {
1008 port->pid = N2N_LOCAL_PID; 1000 port->pid = N2N_LOCAL_PID;
1009 /** 1001 /*
1010 * First, check if we know the device by pwwn. 1002 * First, check if we know the device by pwwn.
1011 */ 1003 */
1012 rport = bfa_fcs_lport_get_rport_by_pwwn(port, 1004 rport = bfa_fcs_lport_get_rport_by_pwwn(port,
@@ -1035,7 +1027,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
1035 } 1027 }
1036} 1028}
1037 1029
1038/** 1030/*
1039 * Called by fcs/port to notify transition to offline state. 1031 * Called by fcs/port to notify transition to offline state.
1040 */ 1032 */
1041static void 1033static void
@@ -1094,11 +1086,11 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1094 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 1086 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
1095static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 1087static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1096 struct bfa_fcs_fdmi_port_attr_s *port_attr); 1088 struct bfa_fcs_fdmi_port_attr_s *port_attr);
1097/** 1089/*
1098 * fcs_fdmi_sm FCS FDMI state machine 1090 * fcs_fdmi_sm FCS FDMI state machine
1099 */ 1091 */
1100 1092
1101/** 1093/*
1102 * FDMI State Machine events 1094 * FDMI State Machine events
1103 */ 1095 */
1104enum port_fdmi_event { 1096enum port_fdmi_event {
@@ -1143,7 +1135,7 @@ static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1143static void bfa_fcs_lport_fdmi_sm_disabled( 1135static void bfa_fcs_lport_fdmi_sm_disabled(
1144 struct bfa_fcs_lport_fdmi_s *fdmi, 1136 struct bfa_fcs_lport_fdmi_s *fdmi,
1145 enum port_fdmi_event event); 1137 enum port_fdmi_event event);
1146/** 1138/*
1147 * Start in offline state - awaiting MS to send start. 1139 * Start in offline state - awaiting MS to send start.
1148 */ 1140 */
1149static void 1141static void
@@ -1510,7 +1502,7 @@ bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
1510 bfa_sm_fault(port->fcs, event); 1502 bfa_sm_fault(port->fcs, event);
1511 } 1503 }
1512} 1504}
1513/** 1505/*
1514 * FDMI is disabled state. 1506 * FDMI is disabled state.
1515 */ 1507 */
1516static void 1508static void
@@ -1525,7 +1517,7 @@ bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
1525 /* No op State. It can only be enabled at Driver Init. */ 1517 /* No op State. It can only be enabled at Driver Init. */
1526} 1518}
1527 1519
1528/** 1520/*
1529* RHBA : Register HBA Attributes. 1521* RHBA : Register HBA Attributes.
1530 */ 1522 */
1531static void 1523static void
@@ -1607,8 +1599,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1607 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1599 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1608 len += attr->len; 1600 len += attr->len;
1609 count++; 1601 count++;
1610 attr->len = 1602 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1611 cpu_to_be16(attr->len + sizeof(attr->type) +
1612 sizeof(attr->len)); 1603 sizeof(attr->len));
1613 1604
1614 /* 1605 /*
@@ -1618,15 +1609,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1618 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); 1609 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
1619 attr->len = (u16) strlen(fcs_hba_attr->manufacturer); 1610 attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
1620 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); 1611 memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
1621 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1612 attr->len = fc_roundup(attr->len, sizeof(u32));
1622 *fields need
1623 *to be 4 byte
1624 *aligned */
1625 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1613 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1626 len += attr->len; 1614 len += attr->len;
1627 count++; 1615 count++;
1628 attr->len = 1616 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1629 cpu_to_be16(attr->len + sizeof(attr->type) +
1630 sizeof(attr->len)); 1617 sizeof(attr->len));
1631 1618
1632 /* 1619 /*
@@ -1636,15 +1623,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1636 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); 1623 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
1637 attr->len = (u16) strlen(fcs_hba_attr->serial_num); 1624 attr->len = (u16) strlen(fcs_hba_attr->serial_num);
1638 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); 1625 memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
1639 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1626 attr->len = fc_roundup(attr->len, sizeof(u32));
1640 *fields need
1641 *to be 4 byte
1642 *aligned */
1643 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1627 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1644 len += attr->len; 1628 len += attr->len;
1645 count++; 1629 count++;
1646 attr->len = 1630 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1647 cpu_to_be16(attr->len + sizeof(attr->type) +
1648 sizeof(attr->len)); 1631 sizeof(attr->len));
1649 1632
1650 /* 1633 /*
@@ -1654,15 +1637,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1654 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); 1637 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
1655 attr->len = (u16) strlen(fcs_hba_attr->model); 1638 attr->len = (u16) strlen(fcs_hba_attr->model);
1656 memcpy(attr->value, fcs_hba_attr->model, attr->len); 1639 memcpy(attr->value, fcs_hba_attr->model, attr->len);
1657 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1640 attr->len = fc_roundup(attr->len, sizeof(u32));
1658 *fields need
1659 *to be 4 byte
1660 *aligned */
1661 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1641 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1662 len += attr->len; 1642 len += attr->len;
1663 count++; 1643 count++;
1664 attr->len = 1644 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1665 cpu_to_be16(attr->len + sizeof(attr->type) +
1666 sizeof(attr->len)); 1645 sizeof(attr->len));
1667 1646
1668 /* 1647 /*
@@ -1672,15 +1651,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1672 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); 1651 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
1673 attr->len = (u16) strlen(fcs_hba_attr->model_desc); 1652 attr->len = (u16) strlen(fcs_hba_attr->model_desc);
1674 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); 1653 memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
1675 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1654 attr->len = fc_roundup(attr->len, sizeof(u32));
1676 *fields need
1677 *to be 4 byte
1678 *aligned */
1679 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1655 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1680 len += attr->len; 1656 len += attr->len;
1681 count++; 1657 count++;
1682 attr->len = 1658 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1683 cpu_to_be16(attr->len + sizeof(attr->type) +
1684 sizeof(attr->len)); 1659 sizeof(attr->len));
1685 1660
1686 /* 1661 /*
@@ -1691,15 +1666,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1691 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); 1666 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
1692 attr->len = (u16) strlen(fcs_hba_attr->hw_version); 1667 attr->len = (u16) strlen(fcs_hba_attr->hw_version);
1693 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); 1668 memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
1694 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1669 attr->len = fc_roundup(attr->len, sizeof(u32));
1695 *fields need
1696 *to be 4 byte
1697 *aligned */
1698 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1670 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1699 len += attr->len; 1671 len += attr->len;
1700 count++; 1672 count++;
1701 attr->len = 1673 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1702 cpu_to_be16(attr->len + sizeof(attr->type) +
1703 sizeof(attr->len)); 1674 sizeof(attr->len));
1704 } 1675 }
1705 1676
@@ -1710,15 +1681,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1710 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); 1681 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
1711 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1682 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1712 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1683 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1713 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1684 attr->len = fc_roundup(attr->len, sizeof(u32));
1714 *fields need
1715 *to be 4 byte
1716 *aligned */
1717 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1685 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1718 len += attr->len;; 1686 len += attr->len;;
1719 count++; 1687 count++;
1720 attr->len = 1688 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1721 cpu_to_be16(attr->len + sizeof(attr->type) +
1722 sizeof(attr->len)); 1689 sizeof(attr->len));
1723 1690
1724 /* 1691 /*
@@ -1729,15 +1696,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1729 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); 1696 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
1730 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); 1697 attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
1731 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); 1698 memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
1732 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1699 attr->len = fc_roundup(attr->len, sizeof(u32));
1733 *fields need
1734 *to be 4 byte
1735 *aligned */
1736 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1700 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1737 len += attr->len; 1701 len += attr->len;
1738 count++; 1702 count++;
1739 attr->len = 1703 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1740 cpu_to_be16(attr->len + sizeof(attr->type) +
1741 sizeof(attr->len)); 1704 sizeof(attr->len));
1742 } 1705 }
1743 1706
@@ -1748,15 +1711,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1748 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 1711 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
1749 attr->len = (u16) strlen(fcs_hba_attr->driver_version); 1712 attr->len = (u16) strlen(fcs_hba_attr->driver_version);
1750 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); 1713 memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
1751 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1714 attr->len = fc_roundup(attr->len, sizeof(u32));
1752 *fields need
1753 *to be 4 byte
1754 *aligned */
1755 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1715 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1756 len += attr->len; 1716 len += attr->len;
1757 count++; 1717 count++;
1758 attr->len = 1718 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1759 cpu_to_be16(attr->len + sizeof(attr->type) +
1760 sizeof(attr->len)); 1719 sizeof(attr->len));
1761 1720
1762 /* 1721 /*
@@ -1767,15 +1726,11 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1767 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); 1726 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
1768 attr->len = (u16) strlen(fcs_hba_attr->os_name); 1727 attr->len = (u16) strlen(fcs_hba_attr->os_name);
1769 memcpy(attr->value, fcs_hba_attr->os_name, attr->len); 1728 memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
1770 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1729 attr->len = fc_roundup(attr->len, sizeof(u32));
1771 *fields need
1772 *to be 4 byte
1773 *aligned */
1774 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1730 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1775 len += attr->len; 1731 len += attr->len;
1776 count++; 1732 count++;
1777 attr->len = 1733 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1778 cpu_to_be16(attr->len + sizeof(attr->type) +
1779 sizeof(attr->len)); 1734 sizeof(attr->len));
1780 } 1735 }
1781 1736
@@ -1788,15 +1743,13 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1788 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); 1743 memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
1789 len += attr->len; 1744 len += attr->len;
1790 count++; 1745 count++;
1791 attr->len = 1746 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1792 cpu_to_be16(attr->len + sizeof(attr->type) +
1793 sizeof(attr->len)); 1747 sizeof(attr->len));
1794 1748
1795 /* 1749 /*
1796 * Update size of payload 1750 * Update size of payload
1797 */ 1751 */
1798 len += ((sizeof(attr->type) + 1752 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
1799 sizeof(attr->len)) * count);
1800 1753
1801 rhba->hba_attr_blk.attr_count = cpu_to_be32(count); 1754 rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
1802 return len; 1755 return len;
@@ -1837,7 +1790,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
1837 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 1790 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
1838} 1791}
1839 1792
1840/** 1793/*
1841* RPRT : Register Port 1794* RPRT : Register Port
1842 */ 1795 */
1843static void 1796static void
@@ -1879,7 +1832,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1879 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); 1832 bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
1880} 1833}
1881 1834
1882/** 1835/*
1883 * This routine builds Port Attribute Block that used in RPA, RPRT commands. 1836 * This routine builds Port Attribute Block that used in RPA, RPRT commands.
1884 */ 1837 */
1885static u16 1838static u16
@@ -1943,8 +1896,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1943 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1896 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1944 len += attr->len; 1897 len += attr->len;
1945 ++count; 1898 ++count;
1946 attr->len = 1899 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1947 cpu_to_be16(attr->len + sizeof(attr->type) +
1948 sizeof(attr->len)); 1900 sizeof(attr->len));
1949 1901
1950 /* 1902 /*
@@ -1957,8 +1909,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1957 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1909 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1958 len += attr->len; 1910 len += attr->len;
1959 ++count; 1911 ++count;
1960 attr->len = 1912 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1961 cpu_to_be16(attr->len + sizeof(attr->type) +
1962 sizeof(attr->len)); 1913 sizeof(attr->len));
1963 1914
1964 /* 1915 /*
@@ -1969,15 +1920,11 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1969 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); 1920 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
1970 attr->len = (u16) strlen(fcs_port_attr.os_device_name); 1921 attr->len = (u16) strlen(fcs_port_attr.os_device_name);
1971 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); 1922 memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
1972 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1923 attr->len = fc_roundup(attr->len, sizeof(u32));
1973 *fields need
1974 *to be 4 byte
1975 *aligned */
1976 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1924 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1977 len += attr->len; 1925 len += attr->len;
1978 ++count; 1926 ++count;
1979 attr->len = 1927 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1980 cpu_to_be16(attr->len + sizeof(attr->type) +
1981 sizeof(attr->len)); 1928 sizeof(attr->len));
1982 } 1929 }
1983 /* 1930 /*
@@ -1988,15 +1935,11 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
1988 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); 1935 attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
1989 attr->len = (u16) strlen(fcs_port_attr.host_name); 1936 attr->len = (u16) strlen(fcs_port_attr.host_name);
1990 memcpy(attr->value, fcs_port_attr.host_name, attr->len); 1937 memcpy(attr->value, fcs_port_attr.host_name, attr->len);
1991 attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable 1938 attr->len = fc_roundup(attr->len, sizeof(u32));
1992 *fields need
1993 *to be 4 byte
1994 *aligned */
1995 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; 1939 curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
1996 len += attr->len; 1940 len += attr->len;
1997 ++count; 1941 ++count;
1998 attr->len = 1942 attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
1999 cpu_to_be16(attr->len + sizeof(attr->type) +
2000 sizeof(attr->len)); 1943 sizeof(attr->len));
2001 } 1944 }
2002 1945
@@ -2004,8 +1947,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
2004 * Update size of payload 1947 * Update size of payload
2005 */ 1948 */
2006 port_attrib->attr_count = cpu_to_be32(count); 1949 port_attrib->attr_count = cpu_to_be32(count);
2007 len += ((sizeof(attr->type) + 1950 len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
2008 sizeof(attr->len)) * count);
2009 return len; 1951 return len;
2010} 1952}
2011 1953
@@ -2062,7 +2004,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2062 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); 2004 bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
2063} 2005}
2064 2006
2065/** 2007/*
2066* RPA : Register Port Attributes. 2008* RPA : Register Port Attributes.
2067 */ 2009 */
2068static void 2010static void
@@ -2091,10 +2033,8 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2091 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), 2033 len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
2092 FDMI_RPA); 2034 FDMI_RPA);
2093 2035
2094 attr_len = 2036 attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
2095 bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, 2037 (u8 *) ((struct ct_hdr_s *) pyld + 1));
2096 (u8 *) ((struct ct_hdr_s *) pyld
2097 + 1));
2098 2038
2099 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 2039 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2100 FC_CLASS_3, len + attr_len, &fchs, 2040 FC_CLASS_3, len + attr_len, &fchs,
@@ -2321,11 +2261,11 @@ static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
2321 u32 rsp_len, 2261 u32 rsp_len,
2322 u32 resid_len, 2262 u32 resid_len,
2323 struct fchs_s *rsp_fchs); 2263 struct fchs_s *rsp_fchs);
2324/** 2264/*
2325 * fcs_ms_sm FCS MS state machine 2265 * fcs_ms_sm FCS MS state machine
2326 */ 2266 */
2327 2267
2328/** 2268/*
2329 * MS State Machine events 2269 * MS State Machine events
2330 */ 2270 */
2331enum port_ms_event { 2271enum port_ms_event {
@@ -2360,7 +2300,7 @@ static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2360 enum port_ms_event event); 2300 enum port_ms_event event);
2361static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, 2301static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
2362 enum port_ms_event event); 2302 enum port_ms_event event);
2363/** 2303/*
2364 * Start in offline state - awaiting NS to send start. 2304 * Start in offline state - awaiting NS to send start.
2365 */ 2305 */
2366static void 2306static void
@@ -2432,7 +2372,7 @@ bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
2432 */ 2372 */
2433 bfa_fcs_lport_fdmi_online(ms); 2373 bfa_fcs_lport_fdmi_online(ms);
2434 2374
2435 /** 2375 /*
2436 * if this is a Vport, go to online state. 2376 * if this is a Vport, go to online state.
2437 */ 2377 */
2438 if (ms->port->vport) { 2378 if (ms->port->vport) {
@@ -2595,7 +2535,7 @@ bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
2595 bfa_sm_fault(ms->port->fcs, event); 2535 bfa_sm_fault(ms->port->fcs, event);
2596 } 2536 }
2597} 2537}
2598/** 2538/*
2599 * ms_pvt MS local functions 2539 * ms_pvt MS local functions
2600 */ 2540 */
2601 2541
@@ -2795,7 +2735,7 @@ bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
2795 bfa_sm_fault(ms->port->fcs, event); 2735 bfa_sm_fault(ms->port->fcs, event);
2796 } 2736 }
2797} 2737}
2798/** 2738/*
2799 * ms_pvt MS local functions 2739 * ms_pvt MS local functions
2800 */ 2740 */
2801 2741
@@ -2871,7 +2811,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
2871 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); 2811 bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
2872} 2812}
2873 2813
2874/** 2814/*
2875 * ms_pvt MS local functions 2815 * ms_pvt MS local functions
2876 */ 2816 */
2877 2817
@@ -3017,7 +2957,7 @@ bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
3017 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); 2957 bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
3018} 2958}
3019 2959
3020/** 2960/*
3021 * @page ns_sm_info VPORT NS State Machine 2961 * @page ns_sm_info VPORT NS State Machine
3022 * 2962 *
3023 * @section ns_sm_interactions VPORT NS State Machine Interactions 2963 * @section ns_sm_interactions VPORT NS State Machine Interactions
@@ -3080,11 +3020,11 @@ static void bfa_fcs_lport_ns_process_gidft_pids(
3080 u32 *pid_buf, u32 n_pids); 3020 u32 *pid_buf, u32 n_pids);
3081 3021
3082static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); 3022static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
3083/** 3023/*
3084 * fcs_ns_sm FCS nameserver interface state machine 3024 * fcs_ns_sm FCS nameserver interface state machine
3085 */ 3025 */
3086 3026
3087/** 3027/*
3088 * VPort NS State Machine events 3028 * VPort NS State Machine events
3089 */ 3029 */
3090enum vport_ns_event { 3030enum vport_ns_event {
@@ -3139,7 +3079,7 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3139 enum vport_ns_event event); 3079 enum vport_ns_event event);
3140static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, 3080static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3141 enum vport_ns_event event); 3081 enum vport_ns_event event);
3142/** 3082/*
3143 * Start in offline state - awaiting linkup 3083 * Start in offline state - awaiting linkup
3144 */ 3084 */
3145static void 3085static void
@@ -3628,7 +3568,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3628 3568
3629 3569
3630 3570
3631/** 3571/*
3632 * ns_pvt Nameserver local functions 3572 * ns_pvt Nameserver local functions
3633 */ 3573 */
3634 3574
@@ -3724,7 +3664,7 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3724 } 3664 }
3725} 3665}
3726 3666
3727/** 3667/*
3728 * Register the symbolic port name. 3668 * Register the symbolic port name.
3729 */ 3669 */
3730static void 3670static void
@@ -3755,7 +3695,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3755 * for V-Port, form a Port Symbolic Name 3695 * for V-Port, form a Port Symbolic Name
3756 */ 3696 */
3757 if (port->vport) { 3697 if (port->vport) {
3758 /** 3698 /*
3759 * For Vports, we append the vport's port symbolic name 3699 * For Vports, we append the vport's port symbolic name
3760 * to that of the base port. 3700 * to that of the base port.
3761 */ 3701 */
@@ -3829,7 +3769,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3829 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3769 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3830} 3770}
3831 3771
3832/** 3772/*
3833 * Register FC4-Types 3773 * Register FC4-Types
3834 */ 3774 */
3835static void 3775static void
@@ -3901,7 +3841,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3901 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3841 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3902} 3842}
3903 3843
3904/** 3844/*
3905 * Register FC4-Features : Should be done after RFT_ID 3845 * Register FC4-Features : Should be done after RFT_ID
3906 */ 3846 */
3907static void 3847static void
@@ -3982,7 +3922,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3982 } else 3922 } else
3983 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); 3923 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
3984} 3924}
3985/** 3925/*
3986 * Query Fabric for FC4-Types Devices. 3926 * Query Fabric for FC4-Types Devices.
3987 * 3927 *
3988* TBD : Need to use a local (FCS private) response buffer, since the response 3928* TBD : Need to use a local (FCS private) response buffer, since the response
@@ -4102,7 +4042,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4102 } 4042 }
4103} 4043}
4104 4044
4105/** 4045/*
4106 * This routine will be called by bfa_timer on timer timeouts. 4046 * This routine will be called by bfa_timer on timer timeouts.
4107 * 4047 *
4108 * param[in] port - pointer to bfa_fcs_lport_t. 4048 * param[in] port - pointer to bfa_fcs_lport_t.
@@ -4166,7 +4106,7 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
4166 } 4106 }
4167} 4107}
4168 4108
4169/** 4109/*
4170 * fcs_ns_public FCS nameserver public interfaces 4110 * fcs_ns_public FCS nameserver public interfaces
4171 */ 4111 */
4172 4112
@@ -4227,7 +4167,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4227 } 4167 }
4228} 4168}
4229 4169
4230/** 4170/*
4231 * FCS SCN 4171 * FCS SCN
4232 */ 4172 */
4233 4173
@@ -4250,11 +4190,11 @@ static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4250 struct fchs_s *rx_fchs); 4190 struct fchs_s *rx_fchs);
4251static void bfa_fcs_lport_scn_timeout(void *arg); 4191static void bfa_fcs_lport_scn_timeout(void *arg);
4252 4192
4253/** 4193/*
4254 * fcs_scm_sm FCS SCN state machine 4194 * fcs_scm_sm FCS SCN state machine
4255 */ 4195 */
4256 4196
4257/** 4197/*
4258 * VPort SCN State Machine events 4198 * VPort SCN State Machine events
4259 */ 4199 */
4260enum port_scn_event { 4200enum port_scn_event {
@@ -4278,7 +4218,7 @@ static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
4278static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, 4218static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4279 enum port_scn_event event); 4219 enum port_scn_event event);
4280 4220
4281/** 4221/*
4282 * Starting state - awaiting link up. 4222 * Starting state - awaiting link up.
4283 */ 4223 */
4284static void 4224static void
@@ -4382,11 +4322,11 @@ bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
4382 4322
4383 4323
4384 4324
4385/** 4325/*
4386 * fcs_scn_private FCS SCN private functions 4326 * fcs_scn_private FCS SCN private functions
4387 */ 4327 */
4388 4328
4389/** 4329/*
4390 * This routine will be called to send a SCR command. 4330 * This routine will be called to send a SCR command.
4391 */ 4331 */
4392static void 4332static void
@@ -4499,7 +4439,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4499 FC_MAX_PDUSZ, 0); 4439 FC_MAX_PDUSZ, 0);
4500} 4440}
4501 4441
4502/** 4442/*
4503 * This routine will be called by bfa_timer on timer timeouts. 4443 * This routine will be called by bfa_timer on timer timeouts.
4504 * 4444 *
4505 * param[in] vport - pointer to bfa_fcs_lport_t. 4445 * param[in] vport - pointer to bfa_fcs_lport_t.
@@ -4522,7 +4462,7 @@ bfa_fcs_lport_scn_timeout(void *arg)
4522 4462
4523 4463
4524 4464
4525/** 4465/*
4526 * fcs_scn_public FCS state change notification public interfaces 4466 * fcs_scn_public FCS state change notification public interfaces
4527 */ 4467 */
4528 4468
@@ -4563,7 +4503,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4563 4503
4564 bfa_trc(port->fcs, rpid); 4504 bfa_trc(port->fcs, rpid);
4565 4505
4566 /** 4506 /*
4567 * If this is an unknown device, then it just came online. 4507 * If this is an unknown device, then it just came online.
4568 * Otherwise let rport handle the RSCN event. 4508 * Otherwise let rport handle the RSCN event.
4569 */ 4509 */
@@ -4579,7 +4519,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4579 bfa_fcs_rport_scn(rport); 4519 bfa_fcs_rport_scn(rport);
4580} 4520}
4581 4521
4582/** 4522/*
4583 * rscn format based PID comparison 4523 * rscn format based PID comparison
4584 */ 4524 */
4585#define __fc_pid_match(__c0, __c1, __fmt) \ 4525#define __fc_pid_match(__c0, __c1, __fmt) \
@@ -4691,18 +4631,18 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
4691 } 4631 }
4692 } 4632 }
4693 4633
4694 /** 4634 /*
4695 * If any of area, domain or fabric RSCN is received, do a fresh discovery 4635 * If any of area, domain or fabric RSCN is received, do a fresh
4696 * to find new devices. 4636 * discovery to find new devices.
4697 */ 4637 */
4698 if (nsquery) 4638 if (nsquery)
4699 bfa_fcs_lport_ns_query(port); 4639 bfa_fcs_lport_ns_query(port);
4700} 4640}
4701 4641
4702/** 4642/*
4703 * BFA FCS port 4643 * BFA FCS port
4704 */ 4644 */
4705/** 4645/*
4706 * fcs_port_api BFA FCS port API 4646 * fcs_port_api BFA FCS port API
4707 */ 4647 */
4708struct bfa_fcs_lport_s * 4648struct bfa_fcs_lport_s *
@@ -4946,7 +4886,7 @@ bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
4946 memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); 4886 memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
4947} 4887}
4948 4888
4949/** 4889/*
4950 * FCS virtual port state machine 4890 * FCS virtual port state machine
4951 */ 4891 */
4952 4892
@@ -4967,11 +4907,11 @@ static void bfa_fcs_vport_timeout(void *vport_arg);
4967static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); 4907static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
4968static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); 4908static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
4969 4909
4970/** 4910/*
4971 * fcs_vport_sm FCS virtual port state machine 4911 * fcs_vport_sm FCS virtual port state machine
4972 */ 4912 */
4973 4913
4974/** 4914/*
4975 * VPort State Machine events 4915 * VPort State Machine events
4976 */ 4916 */
4977enum bfa_fcs_vport_event { 4917enum bfa_fcs_vport_event {
@@ -5024,7 +4964,7 @@ static struct bfa_sm_table_s vport_sm_table[] = {
5024 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} 4964 {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
5025}; 4965};
5026 4966
5027/** 4967/*
5028 * Beginning state. 4968 * Beginning state.
5029 */ 4969 */
5030static void 4970static void
@@ -5045,7 +4985,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
5045 } 4985 }
5046} 4986}
5047 4987
5048/** 4988/*
5049 * Created state - a start event is required to start up the state machine. 4989 * Created state - a start event is required to start up the state machine.
5050 */ 4990 */
5051static void 4991static void
@@ -5062,7 +5002,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5062 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); 5002 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
5063 bfa_fcs_vport_do_fdisc(vport); 5003 bfa_fcs_vport_do_fdisc(vport);
5064 } else { 5004 } else {
5065 /** 5005 /*
5066 * Fabric is offline or not NPIV capable, stay in 5006 * Fabric is offline or not NPIV capable, stay in
5067 * offline state. 5007 * offline state.
5068 */ 5008 */
@@ -5078,7 +5018,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5078 5018
5079 case BFA_FCS_VPORT_SM_ONLINE: 5019 case BFA_FCS_VPORT_SM_ONLINE:
5080 case BFA_FCS_VPORT_SM_OFFLINE: 5020 case BFA_FCS_VPORT_SM_OFFLINE:
5081 /** 5021 /*
5082 * Ignore ONLINE/OFFLINE events from fabric 5022 * Ignore ONLINE/OFFLINE events from fabric
5083 * till vport is started. 5023 * till vport is started.
5084 */ 5024 */
@@ -5089,7 +5029,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
5089 } 5029 }
5090} 5030}
5091 5031
5092/** 5032/*
5093 * Offline state - awaiting ONLINE event from fabric SM. 5033 * Offline state - awaiting ONLINE event from fabric SM.
5094 */ 5034 */
5095static void 5035static void
@@ -5127,7 +5067,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
5127} 5067}
5128 5068
5129 5069
5130/** 5070/*
5131 * FDISC is sent and awaiting reply from fabric. 5071 * FDISC is sent and awaiting reply from fabric.
5132 */ 5072 */
5133static void 5073static void
@@ -5174,7 +5114,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5174 } 5114 }
5175} 5115}
5176 5116
5177/** 5117/*
5178 * FDISC attempt failed - a timer is active to retry FDISC. 5118 * FDISC attempt failed - a timer is active to retry FDISC.
5179 */ 5119 */
5180static void 5120static void
@@ -5208,7 +5148,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5208 } 5148 }
5209} 5149}
5210 5150
5211/** 5151/*
5212 * Vport is online (FDISC is complete). 5152 * Vport is online (FDISC is complete).
5213 */ 5153 */
5214static void 5154static void
@@ -5235,7 +5175,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5235 } 5175 }
5236} 5176}
5237 5177
5238/** 5178/*
5239 * Vport is being deleted - awaiting lport delete completion to send 5179 * Vport is being deleted - awaiting lport delete completion to send
5240 * LOGO to fabric. 5180 * LOGO to fabric.
5241 */ 5181 */
@@ -5264,7 +5204,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
5264 } 5204 }
5265} 5205}
5266 5206
5267/** 5207/*
5268 * Error State. 5208 * Error State.
5269 * This state will be set when the Vport Creation fails due 5209 * This state will be set when the Vport Creation fails due
5270 * to errors like Dup WWN. In this state only operation allowed 5210 * to errors like Dup WWN. In this state only operation allowed
@@ -5288,7 +5228,7 @@ bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
5288 } 5228 }
5289} 5229}
5290 5230
5291/** 5231/*
5292 * Lport cleanup is in progress since vport is being deleted. Fabric is 5232 * Lport cleanup is in progress since vport is being deleted. Fabric is
5293 * offline, so no LOGO is needed to complete vport deletion. 5233 * offline, so no LOGO is needed to complete vport deletion.
5294 */ 5234 */
@@ -5313,7 +5253,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5313 } 5253 }
5314} 5254}
5315 5255
5316/** 5256/*
5317 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup 5257 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
5318 * is done. 5258 * is done.
5319 */ 5259 */
@@ -5347,10 +5287,10 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5347 5287
5348 5288
5349 5289
5350/** 5290/*
5351 * fcs_vport_private FCS virtual port private functions 5291 * fcs_vport_private FCS virtual port private functions
5352 */ 5292 */
5353/** 5293/*
5354 * This routine will be called to send a FDISC command. 5294 * This routine will be called to send a FDISC command.
5355 */ 5295 */
5356static void 5296static void
@@ -5397,7 +5337,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5397 } 5337 }
5398} 5338}
5399 5339
5400/** 5340/*
5401 * Called to send a logout to the fabric. Used when a V-Port is 5341 * Called to send a logout to the fabric. Used when a V-Port is
5402 * deleted/stopped. 5342 * deleted/stopped.
5403 */ 5343 */
@@ -5411,7 +5351,7 @@ bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
5411} 5351}
5412 5352
5413 5353
5414/** 5354/*
5415 * This routine will be called by bfa_timer on timer timeouts. 5355 * This routine will be called by bfa_timer on timer timeouts.
5416 * 5356 *
5417 * param[in] vport - pointer to bfa_fcs_vport_t. 5357 * param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5449,11 +5389,11 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5449 5389
5450 5390
5451 5391
5452/** 5392/*
5453 * fcs_vport_public FCS virtual port public interfaces 5393 * fcs_vport_public FCS virtual port public interfaces
5454 */ 5394 */
5455 5395
5456/** 5396/*
5457 * Online notification from fabric SM. 5397 * Online notification from fabric SM.
5458 */ 5398 */
5459void 5399void
@@ -5463,7 +5403,7 @@ bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
5463 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 5403 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5464} 5404}
5465 5405
5466/** 5406/*
5467 * Offline notification from fabric SM. 5407 * Offline notification from fabric SM.
5468 */ 5408 */
5469void 5409void
@@ -5473,7 +5413,7 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
5473 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); 5413 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
5474} 5414}
5475 5415
5476/** 5416/*
5477 * Cleanup notification from fabric SM on link timer expiry. 5417 * Cleanup notification from fabric SM on link timer expiry.
5478 */ 5418 */
5479void 5419void
@@ -5481,7 +5421,7 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
5481{ 5421{
5482 vport->vport_stats.fab_cleanup++; 5422 vport->vport_stats.fab_cleanup++;
5483} 5423}
5484/** 5424/*
5485 * delete notification from fabric SM. To be invoked from within FCS. 5425 * delete notification from fabric SM. To be invoked from within FCS.
5486 */ 5426 */
5487void 5427void
@@ -5490,7 +5430,7 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
5490 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); 5430 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
5491} 5431}
5492 5432
5493/** 5433/*
5494 * Delete completion callback from associated lport 5434 * Delete completion callback from associated lport
5495 */ 5435 */
5496void 5436void
@@ -5501,11 +5441,11 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
5501 5441
5502 5442
5503 5443
5504/** 5444/*
5505 * fcs_vport_api Virtual port API 5445 * fcs_vport_api Virtual port API
5506 */ 5446 */
5507 5447
5508/** 5448/*
5509 * Use this function to instantiate a new FCS vport object. This 5449 * Use this function to instantiate a new FCS vport object. This
5510 * function will not trigger any HW initialization process (which will be 5450 * function will not trigger any HW initialization process (which will be
5511 * done in vport_start() call) 5451 * done in vport_start() call)
@@ -5555,7 +5495,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5555 return BFA_STATUS_OK; 5495 return BFA_STATUS_OK;
5556} 5496}
5557 5497
5558/** 5498/*
5559 * Use this function to instantiate a new FCS PBC vport object. This 5499 * Use this function to instantiate a new FCS PBC vport object. This
5560 * function will not trigger any HW initialization process (which will be 5500 * function will not trigger any HW initialization process (which will be
5561 * done in vport_start() call) 5501 * done in vport_start() call)
@@ -5585,7 +5525,7 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
5585 return rc; 5525 return rc;
5586} 5526}
5587 5527
5588/** 5528/*
5589 * Use this function to findout if this is a pbc vport or not. 5529 * Use this function to findout if this is a pbc vport or not.
5590 * 5530 *
5591 * @param[in] vport - pointer to bfa_fcs_vport_t. 5531 * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5603,7 +5543,7 @@ bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
5603 5543
5604} 5544}
5605 5545
5606/** 5546/*
5607 * Use this function initialize the vport. 5547 * Use this function initialize the vport.
5608 * 5548 *
5609 * @param[in] vport - pointer to bfa_fcs_vport_t. 5549 * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5618,7 +5558,7 @@ bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
5618 return BFA_STATUS_OK; 5558 return BFA_STATUS_OK;
5619} 5559}
5620 5560
5621/** 5561/*
5622 * Use this function quiese the vport object. This function will return 5562 * Use this function quiese the vport object. This function will return
5623 * immediately, when the vport is actually stopped, the 5563 * immediately, when the vport is actually stopped, the
5624 * bfa_drv_vport_stop_cb() will be called. 5564 * bfa_drv_vport_stop_cb() will be called.
@@ -5635,7 +5575,7 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
5635 return BFA_STATUS_OK; 5575 return BFA_STATUS_OK;
5636} 5576}
5637 5577
5638/** 5578/*
5639 * Use this function to delete a vport object. Fabric object should 5579 * Use this function to delete a vport object. Fabric object should
5640 * be stopped before this function call. 5580 * be stopped before this function call.
5641 * 5581 *
@@ -5657,7 +5597,7 @@ bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
5657 return BFA_STATUS_OK; 5597 return BFA_STATUS_OK;
5658} 5598}
5659 5599
5660/** 5600/*
5661 * Use this function to get vport's current status info. 5601 * Use this function to get vport's current status info.
5662 * 5602 *
5663 * param[in] vport pointer to bfa_fcs_vport_t. 5603 * param[in] vport pointer to bfa_fcs_vport_t.
@@ -5678,7 +5618,7 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
5678 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); 5618 attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
5679} 5619}
5680 5620
5681/** 5621/*
5682 * Use this function to get vport's statistics. 5622 * Use this function to get vport's statistics.
5683 * 5623 *
5684 * param[in] vport pointer to bfa_fcs_vport_t. 5624 * param[in] vport pointer to bfa_fcs_vport_t.
@@ -5693,7 +5633,7 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
5693 *stats = vport->vport_stats; 5633 *stats = vport->vport_stats;
5694} 5634}
5695 5635
5696/** 5636/*
5697 * Use this function to clear vport's statistics. 5637 * Use this function to clear vport's statistics.
5698 * 5638 *
5699 * param[in] vport pointer to bfa_fcs_vport_t. 5639 * param[in] vport pointer to bfa_fcs_vport_t.
@@ -5706,7 +5646,7 @@ bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
5706 memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); 5646 memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
5707} 5647}
5708 5648
5709/** 5649/*
5710 * Lookup a virtual port. Excludes base port from lookup. 5650 * Lookup a virtual port. Excludes base port from lookup.
5711 */ 5651 */
5712struct bfa_fcs_vport_s * 5652struct bfa_fcs_vport_s *
@@ -5728,7 +5668,7 @@ bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
5728 return vport; 5668 return vport;
5729} 5669}
5730 5670
5731/** 5671/*
5732 * FDISC Response 5672 * FDISC Response
5733 */ 5673 */
5734void 5674void
@@ -5784,7 +5724,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
5784 } 5724 }
5785} 5725}
5786 5726
5787/** 5727/*
5788 * LOGO response 5728 * LOGO response
5789 */ 5729 */
5790void 5730void
@@ -5794,7 +5734,7 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
5794 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 5734 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
5795} 5735}
5796 5736
5797/** 5737/*
5798 * Received clear virtual link 5738 * Received clear virtual link
5799 */ 5739 */
5800void 5740void
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 12b50bda589d..47f35c0ef29a 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * rport.c Remote port implementation. 19 * rport.c Remote port implementation.
20 */ 20 */
21 21
@@ -75,7 +75,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 75static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
76 struct fchs_s *rx_fchs, u16 len); 76 struct fchs_s *rx_fchs, u16 len);
77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 77static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
78/** 78/*
79 * fcs_rport_sm FCS rport state machine events 79 * fcs_rport_sm FCS rport state machine events
80 */ 80 */
81 81
@@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, 172 {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
173}; 173};
174 174
175/** 175/*
176 * Beginning state. 176 * Beginning state.
177 */ 177 */
178static void 178static void
@@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
210 } 210 }
211} 211}
212 212
213/** 213/*
214 * PLOGI is being sent. 214 * PLOGI is being sent.
215 */ 215 */
216static void 216static void
@@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
262 } 262 }
263} 263}
264 264
265/** 265/*
266 * PLOGI is being sent. 266 * PLOGI is being sent.
267 */ 267 */
268static void 268static void
@@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
287 287
288 case RPSM_EVENT_PLOGI_RCVD: 288 case RPSM_EVENT_PLOGI_RCVD:
289 case RPSM_EVENT_SCN: 289 case RPSM_EVENT_SCN:
290 /** 290 /*
291 * Ignore, SCN is possibly online notification. 291 * Ignore, SCN is possibly online notification.
292 */ 292 */
293 break; 293 break;
@@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
309 break; 309 break;
310 310
311 case RPSM_EVENT_HCB_OFFLINE: 311 case RPSM_EVENT_HCB_OFFLINE:
312 /** 312 /*
313 * Ignore BFA callback, on a PLOGI receive we call bfa offline. 313 * Ignore BFA callback, on a PLOGI receive we call bfa offline.
314 */ 314 */
315 break; 315 break;
@@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
319 } 319 }
320} 320}
321 321
322/** 322/*
323 * PLOGI is sent. 323 * PLOGI is sent.
324 */ 324 */
325static void 325static void
@@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
380 } 380 }
381} 381}
382 382
383/** 383/*
384 * PLOGI is sent. 384 * PLOGI is sent.
385 */ 385 */
386static void 386static void
@@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
475 } 475 }
476} 476}
477 477
478/** 478/*
479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s 479 * PLOGI is complete. Awaiting BFA rport online callback. FC-4s
480 * are offline. 480 * are offline.
481 */ 481 */
@@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
519 break; 519 break;
520 520
521 case RPSM_EVENT_SCN: 521 case RPSM_EVENT_SCN:
522 /** 522 /*
523 * @todo 523 * @todo
524 * Ignore SCN - PLOGI just completed, FC-4 login should detect 524 * Ignore SCN - PLOGI just completed, FC-4 login should detect
525 * device failures. 525 * device failures.
@@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
531 } 531 }
532} 532}
533 533
534/** 534/*
535 * Rport is ONLINE. FC-4s active. 535 * Rport is ONLINE. FC-4s active.
536 */ 536 */
537static void 537static void
@@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
580 } 580 }
581} 581}
582 582
583/** 583/*
584 * An SCN event is received in ONLINE state. NS query is being sent 584 * An SCN event is received in ONLINE state. NS query is being sent
585 * prior to ADISC authentication with rport. FC-4s are paused. 585 * prior to ADISC authentication with rport. FC-4s are paused.
586 */ 586 */
@@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
604 break; 604 break;
605 605
606 case RPSM_EVENT_SCN: 606 case RPSM_EVENT_SCN:
607 /** 607 /*
608 * ignore SCN, wait for response to query itself 608 * ignore SCN, wait for response to query itself
609 */ 609 */
610 break; 610 break;
@@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
638 } 638 }
639} 639}
640 640
641/** 641/*
642 * An SCN event is received in ONLINE state. NS query is sent to rport. 642 * An SCN event is received in ONLINE state. NS query is sent to rport.
643 * FC-4s are paused. 643 * FC-4s are paused.
644 */ 644 */
@@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
697 } 697 }
698} 698}
699 699
700/** 700/*
701 * An SCN event is received in ONLINE state. ADISC is being sent for 701 * An SCN event is received in ONLINE state. ADISC is being sent for
702 * authenticating with rport. FC-4s are paused. 702 * authenticating with rport. FC-4s are paused.
703 */ 703 */
@@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
748 } 748 }
749} 749}
750 750
751/** 751/*
752 * An SCN event is received in ONLINE state. ADISC is to rport. 752 * An SCN event is received in ONLINE state. ADISC is to rport.
753 * FC-4s are paused. 753 * FC-4s are paused.
754 */ 754 */
@@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
765 break; 765 break;
766 766
767 case RPSM_EVENT_PLOGI_RCVD: 767 case RPSM_EVENT_PLOGI_RCVD:
768 /** 768 /*
769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI. 769 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
770 * At least go offline when a PLOGI is received. 770 * At least go offline when a PLOGI is received.
771 */ 771 */
@@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
787 break; 787 break;
788 788
789 case RPSM_EVENT_SCN: 789 case RPSM_EVENT_SCN:
790 /** 790 /*
791 * already processing RSCN 791 * already processing RSCN
792 */ 792 */
793 break; 793 break;
@@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
810 } 810 }
811} 811}
812 812
813/** 813/*
814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 814 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
815 */ 815 */
816static void 816static void
@@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
841 } 841 }
842} 842}
843 843
844/** 844/*
845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion 845 * LOGO needs to be sent to rport. Awaiting FC-4 offline completion
846 * callback. 846 * callback.
847 */ 847 */
@@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
864 } 864 }
865} 865}
866 866
867/** 867/*
868 * Rport is going offline. Awaiting FC-4 offline completion callback. 868 * Rport is going offline. Awaiting FC-4 offline completion callback.
869 */ 869 */
870static void 870static void
@@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
886 case RPSM_EVENT_LOGO_RCVD: 886 case RPSM_EVENT_LOGO_RCVD:
887 case RPSM_EVENT_PRLO_RCVD: 887 case RPSM_EVENT_PRLO_RCVD:
888 case RPSM_EVENT_ADDRESS_CHANGE: 888 case RPSM_EVENT_ADDRESS_CHANGE:
889 /** 889 /*
890 * rport is already going offline. 890 * rport is already going offline.
891 * SCN - ignore and wait till transitioning to offline state 891 * SCN - ignore and wait till transitioning to offline state
892 */ 892 */
@@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
901 } 901 }
902} 902}
903 903
904/** 904/*
905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 905 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
906 * callback. 906 * callback.
907 */ 907 */
@@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
945 case RPSM_EVENT_SCN: 945 case RPSM_EVENT_SCN:
946 case RPSM_EVENT_LOGO_RCVD: 946 case RPSM_EVENT_LOGO_RCVD:
947 case RPSM_EVENT_PRLO_RCVD: 947 case RPSM_EVENT_PRLO_RCVD:
948 /** 948 /*
949 * Ignore, already offline. 949 * Ignore, already offline.
950 */ 950 */
951 break; 951 break;
@@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
955 } 955 }
956} 956}
957 957
958/** 958/*
959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline 959 * Rport is offline. FC-4s are offline. Awaiting BFA rport offline
960 * callback to send LOGO accept. 960 * callback to send LOGO accept.
961 */ 961 */
@@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1009 1009
1010 case RPSM_EVENT_LOGO_RCVD: 1010 case RPSM_EVENT_LOGO_RCVD:
1011 case RPSM_EVENT_PRLO_RCVD: 1011 case RPSM_EVENT_PRLO_RCVD:
1012 /** 1012 /*
1013 * Ignore - already processing a LOGO. 1013 * Ignore - already processing a LOGO.
1014 */ 1014 */
1015 break; 1015 break;
@@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1019 } 1019 }
1020} 1020}
1021 1021
1022/** 1022/*
1023 * Rport is being deleted. FC-4s are offline. 1023 * Rport is being deleted. FC-4s are offline.
1024 * Awaiting BFA rport offline 1024 * Awaiting BFA rport offline
1025 * callback to send LOGO. 1025 * callback to send LOGO.
@@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1048 } 1048 }
1049} 1049}
1050 1050
1051/** 1051/*
1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent. 1052 * Rport is being deleted. FC-4s are offline. LOGO is being sent.
1053 */ 1053 */
1054static void 1054static void
@@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1082 } 1082 }
1083} 1083}
1084 1084
1085/** 1085/*
1086 * Rport is offline. FC-4s are offline. BFA rport is offline. 1086 * Rport is offline. FC-4s are offline. BFA rport is offline.
1087 * Timer active to delete stale rport. 1087 * Timer active to delete stale rport.
1088 */ 1088 */
@@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1142 } 1142 }
1143} 1143}
1144 1144
1145/** 1145/*
1146 * Rport address has changed. Nameserver discovery request is being sent. 1146 * Rport address has changed. Nameserver discovery request is being sent.
1147 */ 1147 */
1148static void 1148static void
@@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1199 } 1199 }
1200} 1200}
1201 1201
1202/** 1202/*
1203 * Nameserver discovery failed. Waiting for timeout to retry. 1203 * Nameserver discovery failed. Waiting for timeout to retry.
1204 */ 1204 */
1205static void 1205static void
@@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1263 } 1263 }
1264} 1264}
1265 1265
1266/** 1266/*
1267 * Rport address has changed. Nameserver discovery request is sent. 1267 * Rport address has changed. Nameserver discovery request is sent.
1268 */ 1268 */
1269static void 1269static void
@@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1329 bfa_fcs_rport_send_prlo_acc(rport); 1329 bfa_fcs_rport_send_prlo_acc(rport);
1330 break; 1330 break;
1331 case RPSM_EVENT_SCN: 1331 case RPSM_EVENT_SCN:
1332 /** 1332 /*
1333 * ignore, wait for NS query response 1333 * ignore, wait for NS query response
1334 */ 1334 */
1335 break; 1335 break;
1336 1336
1337 case RPSM_EVENT_LOGO_RCVD: 1337 case RPSM_EVENT_LOGO_RCVD:
1338 /** 1338 /*
1339 * Not logged-in yet. Accept LOGO. 1339 * Not logged-in yet. Accept LOGO.
1340 */ 1340 */
1341 bfa_fcs_rport_send_logo_acc(rport); 1341 bfa_fcs_rport_send_logo_acc(rport);
@@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1354 1354
1355 1355
1356 1356
1357/** 1357/*
1358 * fcs_rport_private FCS RPORT provate functions 1358 * fcs_rport_private FCS RPORT provate functions
1359 */ 1359 */
1360 1360
@@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1415 1415
1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); 1416 plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
1417 1417
1418 /** 1418 /*
1419 * Check for failure first. 1419 * Check for failure first.
1420 */ 1420 */
1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { 1421 if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
@@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1436 return; 1436 return;
1437 } 1437 }
1438 1438
1439 /** 1439 /*
1440 * PLOGI is complete. Make sure this device is not one of the known 1440 * PLOGI is complete. Make sure this device is not one of the known
1441 * device with a new FC port address. 1441 * device with a new FC port address.
1442 */ 1442 */
@@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1468 } 1468 }
1469 } 1469 }
1470 1470
1471 /** 1471 /*
1472 * Normal login path -- no evil twins. 1472 * Normal login path -- no evil twins.
1473 */ 1473 */
1474 rport->stats.plogi_accs++; 1474 rport->stats.plogi_accs++;
@@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1722 } 1722 }
1723} 1723}
1724 1724
1725/** 1725/*
1726 * Called to send a logout to the rport. 1726 * Called to send a logout to the rport.
1727 */ 1727 */
1728static void 1728static void
@@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); 1759 bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
1760} 1760}
1761 1761
1762/** 1762/*
1763 * Send ACC for a LOGO received. 1763 * Send ACC for a LOGO received.
1764 */ 1764 */
1765static void 1765static void
@@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1788 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
1789} 1789}
1790 1790
1791/** 1791/*
1792 * brief 1792 * brief
1793 * This routine will be called by bfa_timer on timer timeouts. 1793 * This routine will be called by bfa_timer on timer timeouts.
1794 * 1794 *
@@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1961 struct bfa_fcs_rport_s *rport; 1961 struct bfa_fcs_rport_s *rport;
1962 struct bfad_rport_s *rport_drv; 1962 struct bfad_rport_s *rport_drv;
1963 1963
1964 /** 1964 /*
1965 * allocate rport 1965 * allocate rport
1966 */ 1966 */
1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) 1967 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
@@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1979 rport->pid = rpid; 1979 rport->pid = rpid;
1980 rport->pwwn = pwwn; 1980 rport->pwwn = pwwn;
1981 1981
1982 /** 1982 /*
1983 * allocate BFA rport 1983 * allocate BFA rport
1984 */ 1984 */
1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); 1985 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
@@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1989 return NULL; 1989 return NULL;
1990 } 1990 }
1991 1991
1992 /** 1992 /*
1993 * allocate FC-4s 1993 * allocate FC-4s
1994 */ 1994 */
1995 bfa_assert(bfa_fcs_lport_is_initiator(port)); 1995 bfa_assert(bfa_fcs_lport_is_initiator(port));
@@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2021{ 2021{
2022 struct bfa_fcs_lport_s *port = rport->port; 2022 struct bfa_fcs_lport_s *port = rport->port;
2023 2023
2024 /** 2024 /*
2025 * - delete FC-4s 2025 * - delete FC-4s
2026 * - delete BFA rport 2026 * - delete BFA rport
2027 * - remove from queue of rports 2027 * - remove from queue of rports
@@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2093 } 2093 }
2094} 2094}
2095 2095
2096/** 2096/*
2097 * Update rport parameters from PLOGI or PLOGI accept. 2097 * Update rport parameters from PLOGI or PLOGI accept.
2098 */ 2098 */
2099static void 2099static void
@@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2101{ 2101{
2102 bfa_fcs_lport_t *port = rport->port; 2102 bfa_fcs_lport_t *port = rport->port;
2103 2103
2104 /** 2104 /*
2105 * - port name 2105 * - port name
2106 * - node name 2106 * - node name
2107 */ 2107 */
2108 rport->pwwn = plogi->port_name; 2108 rport->pwwn = plogi->port_name;
2109 rport->nwwn = plogi->node_name; 2109 rport->nwwn = plogi->node_name;
2110 2110
2111 /** 2111 /*
2112 * - class of service 2112 * - class of service
2113 */ 2113 */
2114 rport->fc_cos = 0; 2114 rport->fc_cos = 0;
@@ -2118,7 +2118,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2118 if (plogi->class2.class_valid) 2118 if (plogi->class2.class_valid)
2119 rport->fc_cos |= FC_CLASS_2; 2119 rport->fc_cos |= FC_CLASS_2;
2120 2120
2121 /** 2121 /*
2122 * - CISC 2122 * - CISC
2123 * - MAX receive frame size 2123 * - MAX receive frame size
2124 */ 2124 */
@@ -2127,7 +2127,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2127 2127
2128 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); 2128 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
2129 bfa_trc(port->fcs, port->fabric->bb_credit); 2129 bfa_trc(port->fcs, port->fabric->bb_credit);
2130 /** 2130 /*
2131 * Direct Attach P2P mode : 2131 * Direct Attach P2P mode :
2132 * This is to handle a bug (233476) in IBM targets in Direct Attach 2132 * This is to handle a bug (233476) in IBM targets in Direct Attach
2133 * Mode. Basically, in FLOGI Accept the target would have 2133 * Mode. Basically, in FLOGI Accept the target would have
@@ -2148,7 +2148,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2148 2148
2149} 2149}
2150 2150
2151/** 2151/*
2152 * Called to handle LOGO received from an existing remote port. 2152 * Called to handle LOGO received from an existing remote port.
2153 */ 2153 */
2154static void 2154static void
@@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
2164 2164
2165 2165
2166 2166
2167/** 2167/*
2168 * fcs_rport_public FCS rport public interfaces 2168 * fcs_rport_public FCS rport public interfaces
2169 */ 2169 */
2170 2170
2171/** 2171/*
2172 * Called by bport/vport to create a remote port instance for a discovered 2172 * Called by bport/vport to create a remote port instance for a discovered
2173 * remote device. 2173 * remote device.
2174 * 2174 *
@@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
2191 return rport; 2191 return rport;
2192} 2192}
2193 2193
2194/** 2194/*
2195 * Called to create a rport for which only the wwn is known. 2195 * Called to create a rport for which only the wwn is known.
2196 * 2196 *
2197 * @param[in] port - base port 2197 * @param[in] port - base port
@@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); 2211 bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
2212 return rport; 2212 return rport;
2213} 2213}
2214/** 2214/*
2215 * Called by bport in private loop topology to indicate that a 2215 * Called by bport in private loop topology to indicate that a
2216 * rport has been discovered and plogi has been completed. 2216 * rport has been discovered and plogi has been completed.
2217 * 2217 *
@@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); 2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
2234} 2234}
2235 2235
2236/** 2236/*
2237 * Called by bport/vport to handle PLOGI received from a new remote port. 2237 * Called by bport/vport to handle PLOGI received from a new remote port.
2238 * If an existing rport does a plogi, it will be handled separately. 2238 * If an existing rport does a plogi, it will be handled separately.
2239 */ 2239 */
@@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
2272 return 0; 2272 return 0;
2273} 2273}
2274 2274
2275/** 2275/*
2276 * Called by bport/vport to handle PLOGI received from an existing 2276 * Called by bport/vport to handle PLOGI received from an existing
2277 * remote port. 2277 * remote port.
2278 */ 2278 */
@@ -2280,7 +2280,7 @@ void
2280bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, 2280bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2281 struct fc_logi_s *plogi) 2281 struct fc_logi_s *plogi)
2282{ 2282{
2283 /** 2283 /*
2284 * @todo Handle P2P and initiator-initiator. 2284 * @todo Handle P2P and initiator-initiator.
2285 */ 2285 */
2286 2286
@@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2289 rport->reply_oxid = rx_fchs->ox_id; 2289 rport->reply_oxid = rx_fchs->ox_id;
2290 bfa_trc(rport->fcs, rport->reply_oxid); 2290 bfa_trc(rport->fcs, rport->reply_oxid);
2291 2291
2292 /** 2292 /*
2293 * In Switched fabric topology, 2293 * In Switched fabric topology,
2294 * PLOGI to each other. If our pwwn is smaller, ignore it, 2294 * PLOGI to each other. If our pwwn is smaller, ignore it,
2295 * if it is not a well known address. 2295 * if it is not a well known address.
@@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2307 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
2308} 2308}
2309 2309
2310/** 2310/*
2311 * Called by bport/vport to delete a remote port instance. 2311 * Called by bport/vport to delete a remote port instance.
2312 * 2312 *
2313 * Rport delete is called under the following conditions: 2313 * Rport delete is called under the following conditions:
@@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE); 2321 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
2322} 2322}
2323 2323
2324/** 2324/*
2325 * Called by bport/vport to when a target goes offline. 2325 * Called by bport/vport to when a target goes offline.
2326 * 2326 *
2327 */ 2327 */
@@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2331 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2332} 2332}
2333 2333
2334/** 2334/*
2335 * Called by bport in n2n when a target (attached port) becomes online. 2335 * Called by bport in n2n when a target (attached port) becomes online.
2336 * 2336 *
2337 */ 2337 */
@@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
2340{ 2340{
2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); 2341 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
2342} 2342}
2343/** 2343/*
2344 * Called by bport/vport to notify SCN for the remote port 2344 * Called by bport/vport to notify SCN for the remote port
2345 */ 2345 */
2346void 2346void
@@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2350 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2351} 2351}
2352 2352
2353/** 2353/*
2354 * Called by fcpim to notify that the ITN cleanup is done. 2354 * Called by fcpim to notify that the ITN cleanup is done.
2355 */ 2355 */
2356void 2356void
@@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2359 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2360} 2360}
2361 2361
2362/** 2362/*
2363 * Called by fcptm to notify that the ITN cleanup is done. 2363 * Called by fcptm to notify that the ITN cleanup is done.
2364 */ 2364 */
2365void 2365void
@@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); 2368 bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
2369} 2369}
2370 2370
2371/** 2371/*
2372 * brief 2372 * brief
2373 * This routine BFA callback for bfa_rport_online() call. 2373 * This routine BFA callback for bfa_rport_online() call.
2374 * 2374 *
@@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg)
2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); 2391 bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
2392} 2392}
2393 2393
2394/** 2394/*
2395 * brief 2395 * brief
2396 * This routine BFA callback for bfa_rport_offline() call. 2396 * This routine BFA callback for bfa_rport_offline() call.
2397 * 2397 *
@@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg)
2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); 2413 bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
2414} 2414}
2415 2415
2416/** 2416/*
2417 * brief 2417 * brief
2418 * This routine is a static BFA callback when there is a QoS flow_id 2418 * This routine is a static BFA callback when there is a QoS flow_id
2419 * change notification 2419 * change notification
@@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2437 bfa_trc(rport->fcs, rport->pwwn); 2437 bfa_trc(rport->fcs, rport->pwwn);
2438} 2438}
2439 2439
2440/** 2440/*
2441 * brief 2441 * brief
2442 * This routine is a static BFA callback when there is a QoS priority 2442 * This routine is a static BFA callback when there is a QoS priority
2443 * change notification 2443 * change notification
@@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
2461 bfa_trc(rport->fcs, rport->pwwn); 2461 bfa_trc(rport->fcs, rport->pwwn);
2462} 2462}
2463 2463
2464/** 2464/*
2465 * Called to process any unsolicted frames from this remote port 2465 * Called to process any unsolicted frames from this remote port
2466 */ 2466 */
2467void 2467void
@@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); 2470 bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
2471} 2471}
2472 2472
2473/** 2473/*
2474 * Called to process any unsolicted frames from this remote port 2474 * Called to process any unsolicted frames from this remote port
2475 */ 2475 */
2476void 2476void
@@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2577 FC_MAX_PDUSZ, 0); 2577 FC_MAX_PDUSZ, 0);
2578} 2578}
2579 2579
2580/** 2580/*
2581 * Return state of rport. 2581 * Return state of rport.
2582 */ 2582 */
2583int 2583int
@@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
2586 return bfa_sm_to_state(rport_sm_table, rport->sm); 2586 return bfa_sm_to_state(rport_sm_table, rport->sm);
2587} 2587}
2588 2588
2589/** 2589/*
2590 * brief 2590 * brief
2591 * Called by the Driver to set rport delete/ageout timeout 2591 * Called by the Driver to set rport delete/ageout timeout
2592 * 2592 *
@@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
2613 2613
2614 2614
2615 2615
2616/** 2616/*
2617 * Remote port implementation. 2617 * Remote port implementation.
2618 */ 2618 */
2619 2619
2620/** 2620/*
2621 * fcs_rport_api FCS rport API. 2621 * fcs_rport_api FCS rport API.
2622 */ 2622 */
2623 2623
2624/** 2624/*
2625 * Direct API to add a target by port wwn. This interface is used, for 2625 * Direct API to add a target by port wwn. This interface is used, for
2626 * example, by bios when target pwwn is known from boot lun configuration. 2626 * example, by bios when target pwwn is known from boot lun configuration.
2627 */ 2627 */
@@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
2634 return BFA_STATUS_OK; 2634 return BFA_STATUS_OK;
2635} 2635}
2636 2636
2637/** 2637/*
2638 * Direct API to remove a target and its associated resources. This 2638 * Direct API to remove a target and its associated resources. This
2639 * interface is used, for example, by driver to remove target 2639 * interface is used, for example, by driver to remove target
2640 * ports from the target list for a VM. 2640 * ports from the target list for a VM.
@@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
2663 2663
2664} 2664}
2665 2665
2666/** 2666/*
2667 * Remote device status for display/debug. 2667 * Remote device status for display/debug.
2668 */ 2668 */
2669void 2669void
@@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2704 } 2704 }
2705} 2705}
2706 2706
2707/** 2707/*
2708 * Per remote device statistics. 2708 * Per remote device statistics.
2709 */ 2709 */
2710void 2710void
@@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
2767 2767
2768 2768
2769 2769
2770/** 2770/*
2771 * Remote port features (RPF) implementation. 2771 * Remote port features (RPF) implementation.
2772 */ 2772 */
2773 2773
@@ -2786,7 +2786,7 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
2786 2786
2787static void bfa_fcs_rpf_timeout(void *arg); 2787static void bfa_fcs_rpf_timeout(void *arg);
2788 2788
2789/** 2789/*
2790 * fcs_rport_ftrs_sm FCS rport state machine events 2790 * fcs_rport_ftrs_sm FCS rport state machine events
2791 */ 2791 */
2792 2792
@@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
2981 bfa_sm_fault(rport->fcs, event); 2981 bfa_sm_fault(rport->fcs, event);
2982 } 2982 }
2983} 2983}
2984/** 2984/*
2985 * Called when Rport is created. 2985 * Called when Rport is created.
2986 */ 2986 */
2987void 2987void
@@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); 2995 bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
2996} 2996}
2997 2997
2998/** 2998/*
2999 * Called when Rport becomes online 2999 * Called when Rport becomes online
3000 */ 3000 */
3001void 3001void
@@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); 3010 bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
3011} 3011}
3012 3012
3013/** 3013/*
3014 * Called when Rport becomes offline 3014 * Called when Rport becomes offline
3015 */ 3015 */
3016void 3016void
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index 422e44cfa074..d8464ae60070 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
102 *num_vecs = __HFN_NUMINTS; 102 *num_vecs = __HFN_NUMINTS;
103} 103}
104 104
105/** 105/*
106 * No special setup required for crossbow -- vector assignments are implicit. 106 * No special setup required for crossbow -- vector assignments are implicit.
107 */ 107 */
108void 108void
@@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
129 bfa->msix.handler[i] = bfa_msix_lpu_err; 129 bfa->msix.handler[i] = bfa_msix_lpu_err;
130} 130}
131 131
132/** 132/*
133 * Crossbow -- dummy, interrupts are masked 133 * Crossbow -- dummy, interrupts are masked
134 */ 134 */
135void 135void
@@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
142{ 142{
143} 143}
144 144
145/** 145/*
146 * No special enable/disable -- vector assignments are implicit. 146 * No special enable/disable -- vector assignments are implicit.
147 */ 147 */
148void 148void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 81e670e9c6a6..b0efbc713ffe 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -39,7 +39,7 @@ bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
39 writel(0, kva + __ct_msix_err_vec_reg[fn]); 39 writel(0, kva + __ct_msix_err_vec_reg[fn]);
40} 40}
41 41
42/** 42/*
43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit. 43 * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
44 */ 44 */
45static void 45static void
@@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
110 *num_vecs = BFA_MSIX_CT_MAX; 110 *num_vecs = BFA_MSIX_CT_MAX;
111} 111}
112 112
113/** 113/*
114 * Setup MSI-X vector for catapult 114 * Setup MSI-X vector for catapult
115 */ 115 */
116void 116void
@@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
156 bfa->msix.handler[i] = bfa_hwct_msix_dummy; 156 bfa->msix.handler[i] = bfa_hwct_msix_dummy;
157} 157}
158 158
159/** 159/*
160 * Enable MSI-X vectors 160 * Enable MSI-X vectors
161 */ 161 */
162void 162void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index fddab8857e8c..54475b53a5ab 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -23,7 +23,7 @@
23 23
24BFA_TRC_FILE(CNA, IOC); 24BFA_TRC_FILE(CNA, IOC);
25 25
26/** 26/*
27 * IOC local definitions 27 * IOC local definitions
28 */ 28 */
29#define BFA_IOC_TOV 3000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
@@ -49,7 +49,7 @@ BFA_TRC_FILE(CNA, IOC);
49 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 49 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
50#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 50#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
51 51
52/** 52/*
53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
54 */ 54 */
55 55
@@ -101,11 +101,11 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); 101static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 102static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
103 103
104/** 104/*
105 * hal_ioc_sm 105 * hal_ioc_sm
106 */ 106 */
107 107
108/** 108/*
109 * IOC state machine definitions/declarations 109 * IOC state machine definitions/declarations
110 */ 110 */
111enum ioc_event { 111enum ioc_event {
@@ -144,7 +144,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
145}; 145};
146 146
147/** 147/*
148 * IOCPF state machine definitions/declarations 148 * IOCPF state machine definitions/declarations
149 */ 149 */
150 150
@@ -174,7 +174,7 @@ static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174static void bfa_iocpf_timeout(void *ioc_arg); 174static void bfa_iocpf_timeout(void *ioc_arg);
175static void bfa_iocpf_sem_timeout(void *ioc_arg); 175static void bfa_iocpf_sem_timeout(void *ioc_arg);
176 176
177/** 177/*
178 * IOCPF state machine events 178 * IOCPF state machine events
179 */ 179 */
180enum iocpf_event { 180enum iocpf_event {
@@ -191,7 +191,7 @@ enum iocpf_event {
191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ 191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
192}; 192};
193 193
194/** 194/*
195 * IOCPF states 195 * IOCPF states
196 */ 196 */
197enum bfa_iocpf_state { 197enum bfa_iocpf_state {
@@ -232,11 +232,11 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
233}; 233};
234 234
235/** 235/*
236 * IOC State Machine 236 * IOC State Machine
237 */ 237 */
238 238
239/** 239/*
240 * Beginning state. IOC uninit state. 240 * Beginning state. IOC uninit state.
241 */ 241 */
242 242
@@ -245,7 +245,7 @@ bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
245{ 245{
246} 246}
247 247
248/** 248/*
249 * IOC is in uninit state. 249 * IOC is in uninit state.
250 */ 250 */
251static void 251static void
@@ -262,7 +262,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
262 bfa_sm_fault(ioc, event); 262 bfa_sm_fault(ioc, event);
263 } 263 }
264} 264}
265/** 265/*
266 * Reset entry actions -- initialize state machine 266 * Reset entry actions -- initialize state machine
267 */ 267 */
268static void 268static void
@@ -271,7 +271,7 @@ bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); 271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
272} 272}
273 273
274/** 274/*
275 * IOC is in reset state. 275 * IOC is in reset state.
276 */ 276 */
277static void 277static void
@@ -304,7 +304,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
304 bfa_iocpf_enable(ioc); 304 bfa_iocpf_enable(ioc);
305} 305}
306 306
307/** 307/*
308 * Host IOC function is being enabled, awaiting response from firmware. 308 * Host IOC function is being enabled, awaiting response from firmware.
309 * Semaphore is acquired. 309 * Semaphore is acquired.
310 */ 310 */
@@ -352,7 +352,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
352 bfa_ioc_send_getattr(ioc); 352 bfa_ioc_send_getattr(ioc);
353} 353}
354 354
355/** 355/*
356 * IOC configuration in progress. Timer is active. 356 * IOC configuration in progress. Timer is active.
357 */ 357 */
358static void 358static void
@@ -447,7 +447,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); 447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
448} 448}
449 449
450/** 450/*
451 * IOC is being disabled 451 * IOC is being disabled
452 */ 452 */
453static void 453static void
@@ -474,7 +474,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
474 } 474 }
475} 475}
476 476
477/** 477/*
478 * IOC disable completion entry. 478 * IOC disable completion entry.
479 */ 479 */
480static void 480static void
@@ -514,7 +514,7 @@ bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
515} 515}
516 516
517/** 517/*
518 * Hardware initialization failed. 518 * Hardware initialization failed.
519 */ 519 */
520static void 520static void
@@ -528,7 +528,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
528 break; 528 break;
529 529
530 case IOC_E_FAILED: 530 case IOC_E_FAILED:
531 /** 531 /*
532 * Initialization failure during iocpf init retry. 532 * Initialization failure during iocpf init retry.
533 */ 533 */
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -556,7 +556,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
556 struct bfa_ioc_hbfail_notify_s *notify; 556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
558 558
559 /** 559 /*
560 * Notify driver and common modules registered for notification. 560 * Notify driver and common modules registered for notification.
561 */ 561 */
562 ioc->cbfn->hbfail_cbfn(ioc->bfa); 562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
@@ -569,7 +569,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
569 "Heart Beat of IOC has failed\n"); 569 "Heart Beat of IOC has failed\n");
570} 570}
571 571
572/** 572/*
573 * IOC failure. 573 * IOC failure.
574 */ 574 */
575static void 575static void
@@ -580,7 +580,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
580 switch (event) { 580 switch (event) {
581 581
582 case IOC_E_FAILED: 582 case IOC_E_FAILED:
583 /** 583 /*
584 * Initialization failure during iocpf recovery. 584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!! 585 * !!! Fall through !!!
586 */ 586 */
@@ -608,12 +608,12 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
608 608
609 609
610 610
611/** 611/*
612 * IOCPF State Machine 612 * IOCPF State Machine
613 */ 613 */
614 614
615 615
616/** 616/*
617 * Reset entry actions -- initialize state machine 617 * Reset entry actions -- initialize state machine
618 */ 618 */
619static void 619static void
@@ -623,7 +623,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
623 iocpf->auto_recover = bfa_auto_recover; 623 iocpf->auto_recover = bfa_auto_recover;
624} 624}
625 625
626/** 626/*
627 * Beginning state. IOC is in reset state. 627 * Beginning state. IOC is in reset state.
628 */ 628 */
629static void 629static void
@@ -646,7 +646,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
646 } 646 }
647} 647}
648 648
649/** 649/*
650 * Semaphore should be acquired for version check. 650 * Semaphore should be acquired for version check.
651 */ 651 */
652static void 652static void
@@ -655,7 +655,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
655 bfa_ioc_hw_sem_get(iocpf->ioc); 655 bfa_ioc_hw_sem_get(iocpf->ioc);
656} 656}
657 657
658/** 658/*
659 * Awaiting h/w semaphore to continue with version check. 659 * Awaiting h/w semaphore to continue with version check.
660 */ 660 */
661static void 661static void
@@ -692,7 +692,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
692 } 692 }
693} 693}
694 694
695/** 695/*
696 * Notify enable completion callback. 696 * Notify enable completion callback.
697 */ 697 */
698static void 698static void
@@ -708,7 +708,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
708 bfa_iocpf_timer_start(iocpf->ioc); 708 bfa_iocpf_timer_start(iocpf->ioc);
709} 709}
710 710
711/** 711/*
712 * Awaiting firmware version match. 712 * Awaiting firmware version match.
713 */ 713 */
714static void 714static void
@@ -739,7 +739,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
739 } 739 }
740} 740}
741 741
742/** 742/*
743 * Request for semaphore. 743 * Request for semaphore.
744 */ 744 */
745static void 745static void
@@ -748,7 +748,7 @@ bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
748 bfa_ioc_hw_sem_get(iocpf->ioc); 748 bfa_ioc_hw_sem_get(iocpf->ioc);
749} 749}
750 750
751/** 751/*
752 * Awaiting semaphore for h/w initialzation. 752 * Awaiting semaphore for h/w initialzation.
753 */ 753 */
754static void 754static void
@@ -782,7 +782,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE); 782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
783} 783}
784 784
785/** 785/*
786 * Hardware is being initialized. Interrupts are enabled. 786 * Hardware is being initialized. Interrupts are enabled.
787 * Holding hardware semaphore lock. 787 * Holding hardware semaphore lock.
788 */ 788 */
@@ -839,7 +839,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
839 bfa_ioc_send_enable(iocpf->ioc); 839 bfa_ioc_send_enable(iocpf->ioc);
840} 840}
841 841
842/** 842/*
843 * Host IOC function is being enabled, awaiting response from firmware. 843 * Host IOC function is being enabled, awaiting response from firmware.
844 * Semaphore is acquired. 844 * Semaphore is acquired.
845 */ 845 */
@@ -943,7 +943,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
943 bfa_ioc_send_disable(iocpf->ioc); 943 bfa_ioc_send_disable(iocpf->ioc);
944} 944}
945 945
946/** 946/*
947 * IOC is being disabled 947 * IOC is being disabled
948 */ 948 */
949static void 949static void
@@ -979,7 +979,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
979 } 979 }
980} 980}
981 981
982/** 982/*
983 * IOC disable completion entry. 983 * IOC disable completion entry.
984 */ 984 */
985static void 985static void
@@ -1017,7 +1017,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1017 bfa_iocpf_timer_start(iocpf->ioc); 1017 bfa_iocpf_timer_start(iocpf->ioc);
1018} 1018}
1019 1019
1020/** 1020/*
1021 * Hardware initialization failed. 1021 * Hardware initialization failed.
1022 */ 1022 */
1023static void 1023static void
@@ -1052,18 +1052,18 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1052static void 1052static void
1053bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1053bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1054{ 1054{
1055 /** 1055 /*
1056 * Mark IOC as failed in hardware and stop firmware. 1056 * Mark IOC as failed in hardware and stop firmware.
1057 */ 1057 */
1058 bfa_ioc_lpu_stop(iocpf->ioc); 1058 bfa_ioc_lpu_stop(iocpf->ioc);
1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate); 1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
1060 1060
1061 /** 1061 /*
1062 * Notify other functions on HB failure. 1062 * Notify other functions on HB failure.
1063 */ 1063 */
1064 bfa_ioc_notify_hbfail(iocpf->ioc); 1064 bfa_ioc_notify_hbfail(iocpf->ioc);
1065 1065
1066 /** 1066 /*
1067 * Flush any queued up mailbox requests. 1067 * Flush any queued up mailbox requests.
1068 */ 1068 */
1069 bfa_ioc_mbox_hbfail(iocpf->ioc); 1069 bfa_ioc_mbox_hbfail(iocpf->ioc);
@@ -1072,7 +1072,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1072 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1072 bfa_iocpf_recovery_timer_start(iocpf->ioc);
1073} 1073}
1074 1074
1075/** 1075/*
1076 * IOC is in failed state. 1076 * IOC is in failed state.
1077 */ 1077 */
1078static void 1078static void
@@ -1100,7 +1100,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1100 1100
1101 1101
1102 1102
1103/** 1103/*
1104 * hal_ioc_pvt BFA IOC private functions 1104 * hal_ioc_pvt BFA IOC private functions
1105 */ 1105 */
1106 1106
@@ -1112,7 +1112,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1112 1112
1113 ioc->cbfn->disable_cbfn(ioc->bfa); 1113 ioc->cbfn->disable_cbfn(ioc->bfa);
1114 1114
1115 /** 1115 /*
1116 * Notify common modules registered for notification. 1116 * Notify common modules registered for notification.
1117 */ 1117 */
1118 list_for_each(qe, &ioc->hb_notify_q) { 1118 list_for_each(qe, &ioc->hb_notify_q) {
@@ -1154,7 +1154,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1154{ 1154{
1155 u32 r32; 1155 u32 r32;
1156 1156
1157 /** 1157 /*
1158 * First read to the semaphore register will return 0, subsequent reads 1158 * First read to the semaphore register will return 0, subsequent reads
1159 * will return 1. Semaphore is released by writing 1 to the register 1159 * will return 1. Semaphore is released by writing 1 to the register
1160 */ 1160 */
@@ -1179,7 +1179,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
1179 bfa_sem_timer_stop(ioc); 1179 bfa_sem_timer_stop(ioc);
1180} 1180}
1181 1181
1182/** 1182/*
1183 * Initialize LPU local memory (aka secondary memory / SRAM) 1183 * Initialize LPU local memory (aka secondary memory / SRAM)
1184 */ 1184 */
1185static void 1185static void
@@ -1199,7 +1199,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1199 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); 1199 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1200 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1200 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1201 1201
1202 /** 1202 /*
1203 * wait for memory initialization to be complete 1203 * wait for memory initialization to be complete
1204 */ 1204 */
1205 i = 0; 1205 i = 0;
@@ -1208,7 +1208,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1208 i++; 1208 i++;
1209 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); 1209 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1210 1210
1211 /** 1211 /*
1212 * If memory initialization is not successful, IOC timeout will catch 1212 * If memory initialization is not successful, IOC timeout will catch
1213 * such failures. 1213 * such failures.
1214 */ 1214 */
@@ -1224,7 +1224,7 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1224{ 1224{
1225 u32 pss_ctl; 1225 u32 pss_ctl;
1226 1226
1227 /** 1227 /*
1228 * Take processor out of reset. 1228 * Take processor out of reset.
1229 */ 1229 */
1230 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1230 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
@@ -1238,7 +1238,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1238{ 1238{
1239 u32 pss_ctl; 1239 u32 pss_ctl;
1240 1240
1241 /** 1241 /*
1242 * Put processors in reset. 1242 * Put processors in reset.
1243 */ 1243 */
1244 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); 1244 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
@@ -1247,7 +1247,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1247 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); 1247 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1248} 1248}
1249 1249
1250/** 1250/*
1251 * Get driver and firmware versions. 1251 * Get driver and firmware versions.
1252 */ 1252 */
1253void 1253void
@@ -1270,7 +1270,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1270 } 1270 }
1271} 1271}
1272 1272
1273/** 1273/*
1274 * Returns TRUE if same. 1274 * Returns TRUE if same.
1275 */ 1275 */
1276bfa_boolean_t 1276bfa_boolean_t
@@ -1295,7 +1295,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1295 return BFA_TRUE; 1295 return BFA_TRUE;
1296} 1296}
1297 1297
1298/** 1298/*
1299 * Return true if current running version is valid. Firmware signature and 1299 * Return true if current running version is valid. Firmware signature and
1300 * execution context (driver/bios) must match. 1300 * execution context (driver/bios) must match.
1301 */ 1301 */
@@ -1304,7 +1304,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1304{ 1304{
1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; 1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1306 1306
1307 /** 1307 /*
1308 * If bios/efi boot (flash based) -- return true 1308 * If bios/efi boot (flash based) -- return true
1309 */ 1309 */
1310 if (bfa_ioc_is_bios_optrom(ioc)) 1310 if (bfa_ioc_is_bios_optrom(ioc))
@@ -1329,7 +1329,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1329 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 1329 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1330} 1330}
1331 1331
1332/** 1332/*
1333 * Conditionally flush any pending message from firmware at start. 1333 * Conditionally flush any pending message from firmware at start.
1334 */ 1334 */
1335static void 1335static void
@@ -1361,7 +1361,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1361 boot_type = BFI_BOOT_TYPE_NORMAL; 1361 boot_type = BFI_BOOT_TYPE_NORMAL;
1362 boot_env = BFI_BOOT_LOADER_OS; 1362 boot_env = BFI_BOOT_LOADER_OS;
1363 1363
1364 /** 1364 /*
1365 * Flash based firmware boot BIOS env. 1365 * Flash based firmware boot BIOS env.
1366 */ 1366 */
1367 if (bfa_ioc_is_bios_optrom(ioc)) { 1367 if (bfa_ioc_is_bios_optrom(ioc)) {
@@ -1369,7 +1369,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1369 boot_env = BFI_BOOT_LOADER_BIOS; 1369 boot_env = BFI_BOOT_LOADER_BIOS;
1370 } 1370 }
1371 1371
1372 /** 1372 /*
1373 * Flash based firmware boot UEFI env. 1373 * Flash based firmware boot UEFI env.
1374 */ 1374 */
1375 if (bfa_ioc_is_uefi(ioc)) { 1375 if (bfa_ioc_is_uefi(ioc)) {
@@ -1377,7 +1377,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1377 boot_env = BFI_BOOT_LOADER_UEFI; 1377 boot_env = BFI_BOOT_LOADER_UEFI;
1378 } 1378 }
1379 1379
1380 /** 1380 /*
1381 * check if firmware is valid 1381 * check if firmware is valid
1382 */ 1382 */
1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1388,7 +1388,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1388 return; 1388 return;
1389 } 1389 }
1390 1390
1391 /** 1391 /*
1392 * If hardware initialization is in progress (initialized by other IOC), 1392 * If hardware initialization is in progress (initialized by other IOC),
1393 * just wait for an initialization completion interrupt. 1393 * just wait for an initialization completion interrupt.
1394 */ 1394 */
@@ -1397,7 +1397,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1397 return; 1397 return;
1398 } 1398 }
1399 1399
1400 /** 1400 /*
1401 * If IOC function is disabled and firmware version is same, 1401 * If IOC function is disabled and firmware version is same,
1402 * just re-enable IOC. 1402 * just re-enable IOC.
1403 * 1403 *
@@ -1408,7 +1408,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1408 if (ioc_fwstate == BFI_IOC_DISABLED || 1408 if (ioc_fwstate == BFI_IOC_DISABLED ||
1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { 1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
1410 1410
1411 /** 1411 /*
1412 * When using MSI-X any pending firmware ready event should 1412 * When using MSI-X any pending firmware ready event should
1413 * be flushed. Otherwise MSI-X interrupts are not delivered. 1413 * be flushed. Otherwise MSI-X interrupts are not delivered.
1414 */ 1414 */
@@ -1418,7 +1418,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1418 return; 1418 return;
1419 } 1419 }
1420 1420
1421 /** 1421 /*
1422 * Initialize the h/w for any other states. 1422 * Initialize the h/w for any other states.
1423 */ 1423 */
1424 bfa_ioc_boot(ioc, boot_type, boot_env); 1424 bfa_ioc_boot(ioc, boot_type, boot_env);
@@ -1529,7 +1529,7 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1529} 1529}
1530 1530
1531 1531
1532/** 1532/*
1533 * Initiate a full firmware download. 1533 * Initiate a full firmware download.
1534 */ 1534 */
1535static void 1535static void
@@ -1542,7 +1542,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1542 u32 chunkno = 0; 1542 u32 chunkno = 0;
1543 u32 i; 1543 u32 i;
1544 1544
1545 /** 1545 /*
1546 * Initialize LMEM first before code download 1546 * Initialize LMEM first before code download
1547 */ 1547 */
1548 bfa_ioc_lmem_init(ioc); 1548 bfa_ioc_lmem_init(ioc);
@@ -1563,7 +1563,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1563 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1563 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1564 } 1564 }
1565 1565
1566 /** 1566 /*
1567 * write smem 1567 * write smem
1568 */ 1568 */
1569 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1569 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
@@ -1571,7 +1571,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1571 1571
1572 loff += sizeof(u32); 1572 loff += sizeof(u32);
1573 1573
1574 /** 1574 /*
1575 * handle page offset wrap around 1575 * handle page offset wrap around
1576 */ 1576 */
1577 loff = PSS_SMEM_PGOFF(loff); 1577 loff = PSS_SMEM_PGOFF(loff);
@@ -1598,7 +1598,7 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1598 bfa_ioc_hwinit(ioc, force); 1598 bfa_ioc_hwinit(ioc, force);
1599} 1599}
1600 1600
1601/** 1601/*
1602 * Update BFA configuration from firmware configuration. 1602 * Update BFA configuration from firmware configuration.
1603 */ 1603 */
1604static void 1604static void
@@ -1613,7 +1613,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1613 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1613 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1614} 1614}
1615 1615
1616/** 1616/*
1617 * Attach time initialization of mbox logic. 1617 * Attach time initialization of mbox logic.
1618 */ 1618 */
1619static void 1619static void
@@ -1629,7 +1629,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1629 } 1629 }
1630} 1630}
1631 1631
1632/** 1632/*
1633 * Mbox poll timer -- restarts any pending mailbox requests. 1633 * Mbox poll timer -- restarts any pending mailbox requests.
1634 */ 1634 */
1635static void 1635static void
@@ -1639,27 +1639,27 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1639 struct bfa_mbox_cmd_s *cmd; 1639 struct bfa_mbox_cmd_s *cmd;
1640 u32 stat; 1640 u32 stat;
1641 1641
1642 /** 1642 /*
1643 * If no command pending, do nothing 1643 * If no command pending, do nothing
1644 */ 1644 */
1645 if (list_empty(&mod->cmd_q)) 1645 if (list_empty(&mod->cmd_q))
1646 return; 1646 return;
1647 1647
1648 /** 1648 /*
1649 * If previous command is not yet fetched by firmware, do nothing 1649 * If previous command is not yet fetched by firmware, do nothing
1650 */ 1650 */
1651 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 1651 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1652 if (stat) 1652 if (stat)
1653 return; 1653 return;
1654 1654
1655 /** 1655 /*
1656 * Enqueue command to firmware. 1656 * Enqueue command to firmware.
1657 */ 1657 */
1658 bfa_q_deq(&mod->cmd_q, &cmd); 1658 bfa_q_deq(&mod->cmd_q, &cmd);
1659 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1659 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1660} 1660}
1661 1661
1662/** 1662/*
1663 * Cleanup any pending requests. 1663 * Cleanup any pending requests.
1664 */ 1664 */
1665static void 1665static void
@@ -1672,7 +1672,7 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1672 bfa_q_deq(&mod->cmd_q, &cmd); 1672 bfa_q_deq(&mod->cmd_q, &cmd);
1673} 1673}
1674 1674
1675/** 1675/*
1676 * Read data from SMEM to host through PCI memmap 1676 * Read data from SMEM to host through PCI memmap
1677 * 1677 *
1678 * @param[in] ioc memory for IOC 1678 * @param[in] ioc memory for IOC
@@ -1710,7 +1710,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1710 buf[i] = be32_to_cpu(r32); 1710 buf[i] = be32_to_cpu(r32);
1711 loff += sizeof(u32); 1711 loff += sizeof(u32);
1712 1712
1713 /** 1713 /*
1714 * handle page offset wrap around 1714 * handle page offset wrap around
1715 */ 1715 */
1716 loff = PSS_SMEM_PGOFF(loff); 1716 loff = PSS_SMEM_PGOFF(loff);
@@ -1729,7 +1729,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1729 return BFA_STATUS_OK; 1729 return BFA_STATUS_OK;
1730} 1730}
1731 1731
1732/** 1732/*
1733 * Clear SMEM data from host through PCI memmap 1733 * Clear SMEM data from host through PCI memmap
1734 * 1734 *
1735 * @param[in] ioc memory for IOC 1735 * @param[in] ioc memory for IOC
@@ -1764,7 +1764,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1764 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); 1764 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1765 loff += sizeof(u32); 1765 loff += sizeof(u32);
1766 1766
1767 /** 1767 /*
1768 * handle page offset wrap around 1768 * handle page offset wrap around
1769 */ 1769 */
1770 loff = PSS_SMEM_PGOFF(loff); 1770 loff = PSS_SMEM_PGOFF(loff);
@@ -1783,7 +1783,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1783 return BFA_STATUS_OK; 1783 return BFA_STATUS_OK;
1784} 1784}
1785 1785
1786/** 1786/*
1787 * hal iocpf to ioc interface 1787 * hal iocpf to ioc interface
1788 */ 1788 */
1789static void 1789static void
@@ -1808,7 +1808,7 @@ static void
1808bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) 1808bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1809{ 1809{
1810 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 1810 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1811 /** 1811 /*
1812 * Provide enable completion callback. 1812 * Provide enable completion callback.
1813 */ 1813 */
1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -1819,7 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1819 1819
1820 1820
1821 1821
1822/** 1822/*
1823 * hal_ioc_public 1823 * hal_ioc_public
1824 */ 1824 */
1825 1825
@@ -1843,7 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1843 return BFA_STATUS_OK; 1843 return BFA_STATUS_OK;
1844} 1844}
1845 1845
1846/** 1846/*
1847 * Interface used by diag module to do firmware boot with memory test 1847 * Interface used by diag module to do firmware boot with memory test
1848 * as the entry vector. 1848 * as the entry vector.
1849 */ 1849 */
@@ -1857,7 +1857,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1857 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1857 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1858 return; 1858 return;
1859 1859
1860 /** 1860 /*
1861 * Initialize IOC state of all functions on a chip reset. 1861 * Initialize IOC state of all functions on a chip reset.
1862 */ 1862 */
1863 rb = ioc->pcidev.pci_bar_kva; 1863 rb = ioc->pcidev.pci_bar_kva;
@@ -1872,14 +1872,14 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1872 bfa_ioc_msgflush(ioc); 1872 bfa_ioc_msgflush(ioc);
1873 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1873 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1874 1874
1875 /** 1875 /*
1876 * Enable interrupts just before starting LPU 1876 * Enable interrupts just before starting LPU
1877 */ 1877 */
1878 ioc->cbfn->reset_cbfn(ioc->bfa); 1878 ioc->cbfn->reset_cbfn(ioc->bfa);
1879 bfa_ioc_lpu_start(ioc); 1879 bfa_ioc_lpu_start(ioc);
1880} 1880}
1881 1881
1882/** 1882/*
1883 * Enable/disable IOC failure auto recovery. 1883 * Enable/disable IOC failure auto recovery.
1884 */ 1884 */
1885void 1885void
@@ -1913,7 +1913,7 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1913 u32 r32; 1913 u32 r32;
1914 int i; 1914 int i;
1915 1915
1916 /** 1916 /*
1917 * read the MBOX msg 1917 * read the MBOX msg
1918 */ 1918 */
1919 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); 1919 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
@@ -1923,7 +1923,7 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1923 msgp[i] = cpu_to_be32(r32); 1923 msgp[i] = cpu_to_be32(r32);
1924 } 1924 }
1925 1925
1926 /** 1926 /*
1927 * turn off mailbox interrupt by clearing mailbox status 1927 * turn off mailbox interrupt by clearing mailbox status
1928 */ 1928 */
1929 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1929 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
@@ -1966,7 +1966,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1966 } 1966 }
1967} 1967}
1968 1968
1969/** 1969/*
1970 * IOC attach time initialization and setup. 1970 * IOC attach time initialization and setup.
1971 * 1971 *
1972 * @param[in] ioc memory for IOC 1972 * @param[in] ioc memory for IOC
@@ -1991,7 +1991,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1991 bfa_fsm_send_event(ioc, IOC_E_RESET); 1991 bfa_fsm_send_event(ioc, IOC_E_RESET);
1992} 1992}
1993 1993
1994/** 1994/*
1995 * Driver detach time IOC cleanup. 1995 * Driver detach time IOC cleanup.
1996 */ 1996 */
1997void 1997void
@@ -2000,7 +2000,7 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
2000 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2000 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2001} 2001}
2002 2002
2003/** 2003/*
2004 * Setup IOC PCI properties. 2004 * Setup IOC PCI properties.
2005 * 2005 *
2006 * @param[in] pcidev PCI device information for this IOC 2006 * @param[in] pcidev PCI device information for this IOC
@@ -2014,7 +2014,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2014 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2014 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2015 ioc->cna = ioc->ctdev && !ioc->fcmode; 2015 ioc->cna = ioc->ctdev && !ioc->fcmode;
2016 2016
2017 /** 2017 /*
2018 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2018 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2019 */ 2019 */
2020 if (ioc->ctdev) 2020 if (ioc->ctdev)
@@ -2026,7 +2026,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2026 bfa_ioc_reg_init(ioc); 2026 bfa_ioc_reg_init(ioc);
2027} 2027}
2028 2028
2029/** 2029/*
2030 * Initialize IOC dma memory 2030 * Initialize IOC dma memory
2031 * 2031 *
2032 * @param[in] dm_kva kernel virtual address of IOC dma memory 2032 * @param[in] dm_kva kernel virtual address of IOC dma memory
@@ -2035,7 +2035,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2035void 2035void
2036bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) 2036bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2037{ 2037{
2038 /** 2038 /*
2039 * dma memory for firmware attribute 2039 * dma memory for firmware attribute
2040 */ 2040 */
2041 ioc->attr_dma.kva = dm_kva; 2041 ioc->attr_dma.kva = dm_kva;
@@ -2043,7 +2043,7 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; 2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2044} 2044}
2045 2045
2046/** 2046/*
2047 * Return size of dma memory required. 2047 * Return size of dma memory required.
2048 */ 2048 */
2049u32 2049u32
@@ -2068,7 +2068,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2069} 2069}
2070 2070
2071/** 2071/*
2072 * Returns memory required for saving firmware trace in case of crash. 2072 * Returns memory required for saving firmware trace in case of crash.
2073 * Driver must call this interface to allocate memory required for 2073 * Driver must call this interface to allocate memory required for
2074 * automatic saving of firmware trace. Driver should call 2074 * automatic saving of firmware trace. Driver should call
@@ -2081,7 +2081,7 @@ bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2082} 2082}
2083 2083
2084/** 2084/*
2085 * Initialize memory for saving firmware trace. Driver must initialize 2085 * Initialize memory for saving firmware trace. Driver must initialize
2086 * trace memory before call bfa_ioc_enable(). 2086 * trace memory before call bfa_ioc_enable().
2087 */ 2087 */
@@ -2104,7 +2104,7 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
2104 return PSS_SMEM_PGOFF(fmaddr); 2104 return PSS_SMEM_PGOFF(fmaddr);
2105} 2105}
2106 2106
2107/** 2107/*
2108 * Register mailbox message handler functions 2108 * Register mailbox message handler functions
2109 * 2109 *
2110 * @param[in] ioc IOC instance 2110 * @param[in] ioc IOC instance
@@ -2120,7 +2120,7 @@ bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2120 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 2120 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2121} 2121}
2122 2122
2123/** 2123/*
2124 * Register mailbox message handler function, to be called by common modules 2124 * Register mailbox message handler function, to be called by common modules
2125 */ 2125 */
2126void 2126void
@@ -2133,7 +2133,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2133 mod->mbhdlr[mc].cbarg = cbarg; 2133 mod->mbhdlr[mc].cbarg = cbarg;
2134} 2134}
2135 2135
2136/** 2136/*
2137 * Queue a mailbox command request to firmware. Waits if mailbox is busy. 2137 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2138 * Responsibility of caller to serialize 2138 * Responsibility of caller to serialize
2139 * 2139 *
@@ -2146,7 +2146,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2146 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 2146 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2147 u32 stat; 2147 u32 stat;
2148 2148
2149 /** 2149 /*
2150 * If a previous command is pending, queue new command 2150 * If a previous command is pending, queue new command
2151 */ 2151 */
2152 if (!list_empty(&mod->cmd_q)) { 2152 if (!list_empty(&mod->cmd_q)) {
@@ -2154,7 +2154,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2154 return; 2154 return;
2155 } 2155 }
2156 2156
2157 /** 2157 /*
2158 * If mailbox is busy, queue command for poll timer 2158 * If mailbox is busy, queue command for poll timer
2159 */ 2159 */
2160 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); 2160 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
@@ -2163,13 +2163,13 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2163 return; 2163 return;
2164 } 2164 }
2165 2165
2166 /** 2166 /*
2167 * mailbox is free -- queue command to firmware 2167 * mailbox is free -- queue command to firmware
2168 */ 2168 */
2169 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 2169 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2170} 2170}
2171 2171
2172/** 2172/*
2173 * Handle mailbox interrupts 2173 * Handle mailbox interrupts
2174 */ 2174 */
2175void 2175void
@@ -2181,7 +2181,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2181 2181
2182 bfa_ioc_msgget(ioc, &m); 2182 bfa_ioc_msgget(ioc, &m);
2183 2183
2184 /** 2184 /*
2185 * Treat IOC message class as special. 2185 * Treat IOC message class as special.
2186 */ 2186 */
2187 mc = m.mh.msg_class; 2187 mc = m.mh.msg_class;
@@ -2209,7 +2209,7 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2209 ioc->port_id = bfa_ioc_pcifn(ioc); 2209 ioc->port_id = bfa_ioc_pcifn(ioc);
2210} 2210}
2211 2211
2212/** 2212/*
2213 * return true if IOC is disabled 2213 * return true if IOC is disabled
2214 */ 2214 */
2215bfa_boolean_t 2215bfa_boolean_t
@@ -2219,7 +2219,7 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2219 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 2219 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2220} 2220}
2221 2221
2222/** 2222/*
2223 * return true if IOC firmware is different. 2223 * return true if IOC firmware is different.
2224 */ 2224 */
2225bfa_boolean_t 2225bfa_boolean_t
@@ -2238,7 +2238,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2238 ((__sm) == BFI_IOC_FAIL) || \ 2238 ((__sm) == BFI_IOC_FAIL) || \
2239 ((__sm) == BFI_IOC_CFG_DISABLED)) 2239 ((__sm) == BFI_IOC_CFG_DISABLED))
2240 2240
2241/** 2241/*
2242 * Check if adapter is disabled -- both IOCs should be in a disabled 2242 * Check if adapter is disabled -- both IOCs should be in a disabled
2243 * state. 2243 * state.
2244 */ 2244 */
@@ -2264,7 +2264,7 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2264 return BFA_TRUE; 2264 return BFA_TRUE;
2265} 2265}
2266 2266
2267/** 2267/*
2268 * Add to IOC heartbeat failure notification queue. To be used by common 2268 * Add to IOC heartbeat failure notification queue. To be used by common
2269 * modules such as cee, port, diag. 2269 * modules such as cee, port, diag.
2270 */ 2270 */
@@ -2391,7 +2391,7 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2391 2391
2392 ioc_attr = ioc->attr; 2392 ioc_attr = ioc->attr;
2393 2393
2394 /** 2394 /*
2395 * model name 2395 * model name
2396 */ 2396 */
2397 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2397 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
@@ -2455,7 +2455,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); 2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2456} 2456}
2457 2457
2458/** 2458/*
2459 * hal_wwn_public 2459 * hal_wwn_public
2460 */ 2460 */
2461wwn_t 2461wwn_t
@@ -2521,7 +2521,7 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2521 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); 2521 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2522} 2522}
2523 2523
2524/** 2524/*
2525 * Retrieve saved firmware trace from a prior IOC failure. 2525 * Retrieve saved firmware trace from a prior IOC failure.
2526 */ 2526 */
2527bfa_status_t 2527bfa_status_t
@@ -2541,7 +2541,7 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2541 return BFA_STATUS_OK; 2541 return BFA_STATUS_OK;
2542} 2542}
2543 2543
2544/** 2544/*
2545 * Clear saved firmware trace 2545 * Clear saved firmware trace
2546 */ 2546 */
2547void 2547void
@@ -2550,7 +2550,7 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
2550 ioc->dbg_fwsave_once = BFA_TRUE; 2550 ioc->dbg_fwsave_once = BFA_TRUE;
2551} 2551}
2552 2552
2553/** 2553/*
2554 * Retrieve saved firmware trace from a prior IOC failure. 2554 * Retrieve saved firmware trace from a prior IOC failure.
2555 */ 2555 */
2556bfa_status_t 2556bfa_status_t
@@ -2590,7 +2590,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2590 2590
2591 bfa_ioc_send_fwsync(ioc); 2591 bfa_ioc_send_fwsync(ioc);
2592 2592
2593 /** 2593 /*
2594 * After sending a fw sync mbox command wait for it to 2594 * After sending a fw sync mbox command wait for it to
2595 * take effect. We will not wait for a response because 2595 * take effect. We will not wait for a response because
2596 * 1. fw_sync mbox cmd doesn't have a response. 2596 * 1. fw_sync mbox cmd doesn't have a response.
@@ -2605,7 +2605,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2605 fwsync_iter--; 2605 fwsync_iter--;
2606} 2606}
2607 2607
2608/** 2608/*
2609 * Dump firmware smem 2609 * Dump firmware smem
2610 */ 2610 */
2611bfa_status_t 2611bfa_status_t
@@ -2625,7 +2625,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2625 loff = *offset; 2625 loff = *offset;
2626 dlen = *buflen; 2626 dlen = *buflen;
2627 2627
2628 /** 2628 /*
2629 * First smem read, sync smem before proceeding 2629 * First smem read, sync smem before proceeding
2630 * No need to sync before reading every chunk. 2630 * No need to sync before reading every chunk.
2631 */ 2631 */
@@ -2652,7 +2652,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2652 return status; 2652 return status;
2653} 2653}
2654 2654
2655/** 2655/*
2656 * Firmware statistics 2656 * Firmware statistics
2657 */ 2657 */
2658bfa_status_t 2658bfa_status_t
@@ -2697,7 +2697,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2697 return status; 2697 return status;
2698} 2698}
2699 2699
2700/** 2700/*
2701 * Save firmware trace if configured. 2701 * Save firmware trace if configured.
2702 */ 2702 */
2703static void 2703static void
@@ -2711,7 +2711,7 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2711 } 2711 }
2712} 2712}
2713 2713
2714/** 2714/*
2715 * Firmware failure detected. Start recovery actions. 2715 * Firmware failure detected. Start recovery actions.
2716 */ 2716 */
2717static void 2717static void
@@ -2733,7 +2733,7 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2733 return; 2733 return;
2734} 2734}
2735 2735
2736/** 2736/*
2737 * hal_iocpf_pvt BFA IOC PF private functions 2737 * hal_iocpf_pvt BFA IOC PF private functions
2738 */ 2738 */
2739 2739
@@ -2790,7 +2790,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
2790 bfa_ioc_hw_sem_get(ioc); 2790 bfa_ioc_hw_sem_get(ioc);
2791} 2791}
2792 2792
2793/** 2793/*
2794 * bfa timer function 2794 * bfa timer function
2795 */ 2795 */
2796void 2796void
@@ -2835,7 +2835,7 @@ bfa_timer_beat(struct bfa_timer_mod_s *mod)
2835 } 2835 }
2836} 2836}
2837 2837
2838/** 2838/*
2839 * Should be called with lock protection 2839 * Should be called with lock protection
2840 */ 2840 */
2841void 2841void
@@ -2853,7 +2853,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2853 list_add_tail(&timer->qe, &mod->timer_q); 2853 list_add_tail(&timer->qe, &mod->timer_q);
2854} 2854}
2855 2855
2856/** 2856/*
2857 * Should be called with lock protection 2857 * Should be called with lock protection
2858 */ 2858 */
2859void 2859void
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 86210a5f7249..909945043850 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
34 34
35struct bfa_ioc_hwif_s hwif_cb; 35struct bfa_ioc_hwif_s hwif_cb;
36 36
37/** 37/*
38 * Called from bfa_ioc_attach() to map asic specific calls. 38 * Called from bfa_ioc_attach() to map asic specific calls.
39 */ 39 */
40void 40void
@@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
52 ioc->ioc_hwif = &hwif_cb; 52 ioc->ioc_hwif = &hwif_cb;
53} 53}
54 54
55/** 55/*
56 * Return true if firmware of current driver matches the running firmware. 56 * Return true if firmware of current driver matches the running firmware.
57 */ 57 */
58static bfa_boolean_t 58static bfa_boolean_t
@@ -66,7 +66,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
66{ 66{
67} 67}
68 68
69/** 69/*
70 * Notify other functions on HB failure. 70 * Notify other functions on HB failure.
71 */ 71 */
72static void 72static void
@@ -76,7 +76,7 @@ bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
76 readl(ioc->ioc_regs.err_set); 76 readl(ioc->ioc_regs.err_set);
77} 77}
78 78
79/** 79/*
80 * Host to LPU mailbox message addresses 80 * Host to LPU mailbox message addresses
81 */ 81 */
82static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 82static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } 84 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
85}; 85};
86 86
87/** 87/*
88 * Host <-> LPU mailbox command/status registers 88 * Host <-> LPU mailbox command/status registers
89 */ 89 */
90static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { 90static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
@@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 113 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
114 } 114 }
115 115
116 /** 116 /*
117 * Host <-> LPU mailbox command/status registers 117 * Host <-> LPU mailbox command/status registers
118 */ 118 */
119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; 119 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
@@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); 133 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 134 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
135 135
136 /** 136 /*
137 * sram memory access 137 * sram memory access
138 */ 138 */
139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 139 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 145 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
146} 146}
147 147
148/** 148/*
149 * Initialize IOC to port mapping. 149 * Initialize IOC to port mapping.
150 */ 150 */
151 151
152static void 152static void
153bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) 153bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
154{ 154{
155 /** 155 /*
156 * For crossbow, port id is same as pci function. 156 * For crossbow, port id is same as pci function.
157 */ 157 */
158 ioc->port_id = bfa_ioc_pcifn(ioc); 158 ioc->port_id = bfa_ioc_pcifn(ioc);
@@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
160 bfa_trc(ioc, ioc->port_id); 160 bfa_trc(ioc, ioc->port_id);
161} 161}
162 162
163/** 163/*
164 * Set interrupt mode for a function: INTX or MSIX 164 * Set interrupt mode for a function: INTX or MSIX
165 */ 165 */
166static void 166static void
@@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
168{ 168{
169} 169}
170 170
171/** 171/*
172 * Cleanup hw semaphore and usecnt registers 172 * Cleanup hw semaphore and usecnt registers
173 */ 173 */
174static void 174static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index c852d985e198..115730c0aa77 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
34 34
35struct bfa_ioc_hwif_s hwif_ct; 35struct bfa_ioc_hwif_s hwif_ct;
36 36
37/** 37/*
38 * Called from bfa_ioc_attach() to map asic specific calls. 38 * Called from bfa_ioc_attach() to map asic specific calls.
39 */ 39 */
40void 40void
@@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
52 ioc->ioc_hwif = &hwif_ct; 52 ioc->ioc_hwif = &hwif_ct;
53} 53}
54 54
55/** 55/*
56 * Return true if firmware of current driver matches the running firmware. 56 * Return true if firmware of current driver matches the running firmware.
57 */ 57 */
58static bfa_boolean_t 58static bfa_boolean_t
@@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
62 u32 usecnt; 62 u32 usecnt;
63 struct bfi_ioc_image_hdr_s fwhdr; 63 struct bfi_ioc_image_hdr_s fwhdr;
64 64
65 /** 65 /*
66 * Firmware match check is relevant only for CNA. 66 * Firmware match check is relevant only for CNA.
67 */ 67 */
68 if (!ioc->cna) 68 if (!ioc->cna)
69 return BFA_TRUE; 69 return BFA_TRUE;
70 70
71 /** 71 /*
72 * If bios boot (flash based) -- do not increment usage count 72 * If bios boot (flash based) -- do not increment usage count
73 */ 73 */
74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 74 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
@@ -78,7 +78,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
79 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 79 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
80 80
81 /** 81 /*
82 * If usage count is 0, always return TRUE. 82 * If usage count is 0, always return TRUE.
83 */ 83 */
84 if (usecnt == 0) { 84 if (usecnt == 0) {
@@ -91,12 +91,12 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
91 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 91 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
92 bfa_trc(ioc, ioc_fwstate); 92 bfa_trc(ioc, ioc_fwstate);
93 93
94 /** 94 /*
95 * Use count cannot be non-zero and chip in uninitialized state. 95 * Use count cannot be non-zero and chip in uninitialized state.
96 */ 96 */
97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); 97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
98 98
99 /** 99 /*
100 * Check if another driver with a different firmware is active 100 * Check if another driver with a different firmware is active
101 */ 101 */
102 bfa_ioc_fwver_get(ioc, &fwhdr); 102 bfa_ioc_fwver_get(ioc, &fwhdr);
@@ -106,7 +106,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
106 return BFA_FALSE; 106 return BFA_FALSE;
107 } 107 }
108 108
109 /** 109 /*
110 * Same firmware version. Increment the reference count. 110 * Same firmware version. Increment the reference count.
111 */ 111 */
112 usecnt++; 112 usecnt++;
@@ -121,20 +121,20 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
121{ 121{
122 u32 usecnt; 122 u32 usecnt;
123 123
124 /** 124 /*
125 * Firmware lock is relevant only for CNA. 125 * Firmware lock is relevant only for CNA.
126 */ 126 */
127 if (!ioc->cna) 127 if (!ioc->cna)
128 return; 128 return;
129 129
130 /** 130 /*
131 * If bios boot (flash based) -- do not decrement usage count 131 * If bios boot (flash based) -- do not decrement usage count
132 */ 132 */
133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
134 BFA_IOC_FWIMG_MINSZ) 134 BFA_IOC_FWIMG_MINSZ)
135 return; 135 return;
136 136
137 /** 137 /*
138 * decrement usage count 138 * decrement usage count
139 */ 139 */
140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -148,7 +148,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
149} 149}
150 150
151/** 151/*
152 * Notify other functions on HB failure. 152 * Notify other functions on HB failure.
153 */ 153 */
154static void 154static void
@@ -164,7 +164,7 @@ bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
164 } 164 }
165} 165}
166 166
167/** 167/*
168 * Host to LPU mailbox message addresses 168 * Host to LPU mailbox message addresses
169 */ 169 */
170static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 170static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } 174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
175}; 175};
176 176
177/** 177/*
178 * Host <-> LPU mailbox command/status registers - port 0 178 * Host <-> LPU mailbox command/status registers - port 0
179 */ 179 */
180static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { 180static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
@@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
185}; 185};
186 186
187/** 187/*
188 * Host <-> LPU mailbox command/status registers - port 1 188 * Host <-> LPU mailbox command/status registers - port 1
189 */ 189 */
190static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { 190static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
@@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
238 238
239 /** 239 /*
240 * sram memory access 240 * sram memory access
241 */ 241 */
242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); 242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
249} 249}
250 250
251/** 251/*
252 * Initialize IOC to port mapping. 252 * Initialize IOC to port mapping.
253 */ 253 */
254 254
@@ -259,7 +259,7 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
259 void __iomem *rb = ioc->pcidev.pci_bar_kva; 259 void __iomem *rb = ioc->pcidev.pci_bar_kva;
260 u32 r32; 260 u32 r32;
261 261
262 /** 262 /*
263 * For catapult, base port id on personality register and IOC type 263 * For catapult, base port id on personality register and IOC type
264 */ 264 */
265 r32 = readl(rb + FNC_PERS_REG); 265 r32 = readl(rb + FNC_PERS_REG);
@@ -270,7 +270,7 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
270 bfa_trc(ioc, ioc->port_id); 270 bfa_trc(ioc, ioc->port_id);
271} 271}
272 272
273/** 273/*
274 * Set interrupt mode for a function: INTX or MSIX 274 * Set interrupt mode for a function: INTX or MSIX
275 */ 275 */
276static void 276static void
@@ -285,7 +285,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & 285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
286 __F0_INTX_STATUS; 286 __F0_INTX_STATUS;
287 287
288 /** 288 /*
289 * If already in desired mode, do not change anything 289 * If already in desired mode, do not change anything
290 */ 290 */
291 if (!msix && mode) 291 if (!msix && mode)
@@ -303,7 +303,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
303 writel(r32, rb + FNC_PERS_REG); 303 writel(r32, rb + FNC_PERS_REG);
304} 304}
305 305
306/** 306/*
307 * Cleanup hw semaphore and usecnt registers 307 * Cleanup hw semaphore and usecnt registers
308 */ 308 */
309static void 309static void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 17834ff11736..fff96226a383 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -46,7 +46,7 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
46 } 46 }
47} 47}
48 48
49/** 49/*
50 * bfa_port_enable_isr() 50 * bfa_port_enable_isr()
51 * 51 *
52 * 52 *
@@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
63 port->endis_cbfn(port->endis_cbarg, status); 63 port->endis_cbfn(port->endis_cbarg, status);
64} 64}
65 65
66/** 66/*
67 * bfa_port_disable_isr() 67 * bfa_port_disable_isr()
68 * 68 *
69 * 69 *
@@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
80 port->endis_cbfn(port->endis_cbarg, status); 80 port->endis_cbfn(port->endis_cbarg, status);
81} 81}
82 82
83/** 83/*
84 * bfa_port_get_stats_isr() 84 * bfa_port_get_stats_isr()
85 * 85 *
86 * 86 *
@@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
112 } 112 }
113} 113}
114 114
115/** 115/*
116 * bfa_port_clear_stats_isr() 116 * bfa_port_clear_stats_isr()
117 * 117 *
118 * 118 *
@@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
129 port->stats_status = status; 129 port->stats_status = status;
130 port->stats_busy = BFA_FALSE; 130 port->stats_busy = BFA_FALSE;
131 131
132 /** 132 /*
133 * re-initialize time stamp for stats reset 133 * re-initialize time stamp for stats reset
134 */ 134 */
135 bfa_os_gettimeofday(&tv); 135 bfa_os_gettimeofday(&tv);
@@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
141 } 141 }
142} 142}
143 143
144/** 144/*
145 * bfa_port_isr() 145 * bfa_port_isr()
146 * 146 *
147 * 147 *
@@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
189 } 189 }
190} 190}
191 191
192/** 192/*
193 * bfa_port_meminfo() 193 * bfa_port_meminfo()
194 * 194 *
195 * 195 *
@@ -203,7 +203,7 @@ bfa_port_meminfo(void)
203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); 203 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
204} 204}
205 205
206/** 206/*
207 * bfa_port_mem_claim() 207 * bfa_port_mem_claim()
208 * 208 *
209 * 209 *
@@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
220 port->stats_dma.pa = dma_pa; 220 port->stats_dma.pa = dma_pa;
221} 221}
222 222
223/** 223/*
224 * bfa_port_enable() 224 * bfa_port_enable()
225 * 225 *
226 * Send the Port enable request to the f/w 226 * Send the Port enable request to the f/w
@@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
264 return BFA_STATUS_OK; 264 return BFA_STATUS_OK;
265} 265}
266 266
267/** 267/*
268 * bfa_port_disable() 268 * bfa_port_disable()
269 * 269 *
270 * Send the Port disable request to the f/w 270 * Send the Port disable request to the f/w
@@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
308 return BFA_STATUS_OK; 308 return BFA_STATUS_OK;
309} 309}
310 310
311/** 311/*
312 * bfa_port_get_stats() 312 * bfa_port_get_stats()
313 * 313 *
314 * Send the request to the f/w to fetch Port statistics. 314 * Send the request to the f/w to fetch Port statistics.
@@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
348 return BFA_STATUS_OK; 348 return BFA_STATUS_OK;
349} 349}
350 350
351/** 351/*
352 * bfa_port_clear_stats() 352 * bfa_port_clear_stats()
353 * 353 *
354 * 354 *
@@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
385 return BFA_STATUS_OK; 385 return BFA_STATUS_OK;
386} 386}
387 387
388/** 388/*
389 * bfa_port_hbfail() 389 * bfa_port_hbfail()
390 * 390 *
391 * 391 *
@@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg)
415 } 415 }
416} 416}
417 417
418/** 418/*
419 * bfa_port_attach() 419 * bfa_port_attach()
420 * 420 *
421 * 421 *
@@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 449 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail); 450 bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
451 451
452 /** 452 /*
453 * initialize time stamp for stats reset 453 * initialize time stamp for stats reset
454 */ 454 */
455 bfa_os_gettimeofday(&tv); 455 bfa_os_gettimeofday(&tv);
@@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
458 bfa_trc(port, 0); 458 bfa_trc(port, 0);
459} 459}
460 460
461/** 461/*
462 * bfa_port_detach() 462 * bfa_port_detach()
463 * 463 *
464 * 464 *
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 4d13688c76d6..c768143f4805 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -29,7 +29,7 @@ BFA_MODULE(fcport);
29BFA_MODULE(rport); 29BFA_MODULE(rport);
30BFA_MODULE(uf); 30BFA_MODULE(uf);
31 31
32/** 32/*
33 * LPS related definitions 33 * LPS related definitions
34 */ 34 */
35#define BFA_LPS_MIN_LPORTS (1) 35#define BFA_LPS_MIN_LPORTS (1)
@@ -41,7 +41,7 @@ BFA_MODULE(uf);
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43 43
44/** 44/*
45 * lps_pvt BFA LPS private functions 45 * lps_pvt BFA LPS private functions
46 */ 46 */
47 47
@@ -55,7 +55,7 @@ enum bfa_lps_event {
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ 55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
56}; 56};
57 57
58/** 58/*
59 * FC PORT related definitions 59 * FC PORT related definitions
60 */ 60 */
61/* 61/*
@@ -67,7 +67,7 @@ enum bfa_lps_event {
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68 68
69 69
70/** 70/*
71 * BFA port state machine events 71 * BFA port state machine events
72 */ 72 */
73enum bfa_fcport_sm_event { 73enum bfa_fcport_sm_event {
@@ -82,7 +82,7 @@ enum bfa_fcport_sm_event {
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
83}; 83};
84 84
85/** 85/*
86 * BFA port link notification state machine events 86 * BFA port link notification state machine events
87 */ 87 */
88 88
@@ -92,7 +92,7 @@ enum bfa_fcport_ln_sm_event {
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ 92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
93}; 93};
94 94
95/** 95/*
96 * RPORT related definitions 96 * RPORT related definitions
97 */ 97 */
98#define bfa_rport_offline_cb(__rp) do { \ 98#define bfa_rport_offline_cb(__rp) do { \
@@ -126,7 +126,7 @@ enum bfa_rport_event {
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ 126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
127}; 127};
128 128
129/** 129/*
130 * forward declarations FCXP related functions 130 * forward declarations FCXP related functions
131 */ 131 */
132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); 132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
@@ -138,7 +138,7 @@ static void bfa_fcxp_qresume(void *cbarg);
138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, 138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req); 139 struct bfi_fcxp_send_req_s *send_req);
140 140
141/** 141/*
142 * forward declarations for LPS functions 142 * forward declarations for LPS functions
143 */ 143 */
144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
@@ -163,7 +163,7 @@ static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 163static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164static void bfa_lps_cvl_event(struct bfa_lps_s *lps); 164static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
165 165
166/** 166/*
167 * forward declaration for LPS state machine 167 * forward declaration for LPS state machine
168 */ 168 */
169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); 169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -175,7 +175,7 @@ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event 175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event); 176 event);
177 177
178/** 178/*
179 * forward declaration for FC Port functions 179 * forward declaration for FC Port functions
180 */ 180 */
181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); 181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
@@ -193,7 +193,7 @@ static void bfa_fcport_stats_get_timeout(void *cbarg);
193static void bfa_fcport_stats_clr_timeout(void *cbarg); 193static void bfa_fcport_stats_clr_timeout(void *cbarg);
194static void bfa_trunk_iocdisable(struct bfa_s *bfa); 194static void bfa_trunk_iocdisable(struct bfa_s *bfa);
195 195
196/** 196/*
197 * forward declaration for FC PORT state machine 197 * forward declaration for FC PORT state machine
198 */ 198 */
199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, 199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
@@ -252,7 +252,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
252}; 252};
253 253
254 254
255/** 255/*
256 * forward declaration for RPORT related functions 256 * forward declaration for RPORT related functions
257 */ 257 */
258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); 258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
@@ -265,7 +265,7 @@ static void __bfa_cb_rport_online(void *cbarg,
265static void __bfa_cb_rport_offline(void *cbarg, 265static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete); 266 bfa_boolean_t complete);
267 267
268/** 268/*
269 * forward declaration for RPORT state machine 269 * forward declaration for RPORT state machine
270 */ 270 */
271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, 271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
@@ -295,7 +295,7 @@ static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, 295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event); 296 enum bfa_rport_event event);
297 297
298/** 298/*
299 * PLOG related definitions 299 * PLOG related definitions
300 */ 300 */
301static int 301static int
@@ -461,7 +461,7 @@ bfa_plog_get_setting(struct bfa_plog_s *plog)
461 return (bfa_boolean_t)plog->plog_enabled; 461 return (bfa_boolean_t)plog->plog_enabled;
462} 462}
463 463
464/** 464/*
465 * fcxp_pvt BFA FCXP private functions 465 * fcxp_pvt BFA FCXP private functions
466 */ 466 */
467 467
@@ -562,7 +562,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
562 mod->bfa = bfa; 562 mod->bfa = bfa;
563 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 563 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
564 564
565 /** 565 /*
566 * Initialize FCXP request and response payload sizes. 566 * Initialize FCXP request and response payload sizes.
567 */ 567 */
568 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; 568 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
@@ -746,7 +746,7 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
746 746
747 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); 747 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
748 748
749 /** 749 /*
750 * @todo f/w should not set residue to non-0 when everything 750 * @todo f/w should not set residue to non-0 when everything
751 * is received. 751 * is received.
752 */ 752 */
@@ -855,7 +855,7 @@ hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
855 } 855 }
856} 856}
857 857
858/** 858/*
859 * Handler to resume sending fcxp when space in available in cpe queue. 859 * Handler to resume sending fcxp when space in available in cpe queue.
860 */ 860 */
861static void 861static void
@@ -870,7 +870,7 @@ bfa_fcxp_qresume(void *cbarg)
870 bfa_fcxp_queue(fcxp, send_req); 870 bfa_fcxp_queue(fcxp, send_req);
871} 871}
872 872
873/** 873/*
874 * Queue fcxp send request to foimrware. 874 * Queue fcxp send request to foimrware.
875 */ 875 */
876static void 876static void
@@ -954,11 +954,11 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
954 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 954 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
955} 955}
956 956
957/** 957/*
958 * hal_fcxp_api BFA FCXP API 958 * hal_fcxp_api BFA FCXP API
959 */ 959 */
960 960
961/** 961/*
962 * Allocate an FCXP instance to send a response or to send a request 962 * Allocate an FCXP instance to send a response or to send a request
963 * that has a response. Request/response buffers are allocated by caller. 963 * that has a response. Request/response buffers are allocated by caller.
964 * 964 *
@@ -1004,7 +1004,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
1004 return fcxp; 1004 return fcxp;
1005} 1005}
1006 1006
1007/** 1007/*
1008 * Get the internal request buffer pointer 1008 * Get the internal request buffer pointer
1009 * 1009 *
1010 * @param[in] fcxp BFA fcxp pointer 1010 * @param[in] fcxp BFA fcxp pointer
@@ -1031,7 +1031,7 @@ bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1031 return mod->req_pld_sz; 1031 return mod->req_pld_sz;
1032} 1032}
1033 1033
1034/** 1034/*
1035 * Get the internal response buffer pointer 1035 * Get the internal response buffer pointer
1036 * 1036 *
1037 * @param[in] fcxp BFA fcxp pointer 1037 * @param[in] fcxp BFA fcxp pointer
@@ -1051,7 +1051,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1051 return rspbuf; 1051 return rspbuf;
1052} 1052}
1053 1053
1054/** 1054/*
1055 * Free the BFA FCXP 1055 * Free the BFA FCXP
1056 * 1056 *
1057 * @param[in] fcxp BFA fcxp pointer 1057 * @param[in] fcxp BFA fcxp pointer
@@ -1068,7 +1068,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1068 bfa_fcxp_put(fcxp); 1068 bfa_fcxp_put(fcxp);
1069} 1069}
1070 1070
1071/** 1071/*
1072 * Send a FCXP request 1072 * Send a FCXP request
1073 * 1073 *
1074 * @param[in] fcxp BFA fcxp pointer 1074 * @param[in] fcxp BFA fcxp pointer
@@ -1102,7 +1102,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1102 1102
1103 bfa_trc(bfa, fcxp->fcxp_tag); 1103 bfa_trc(bfa, fcxp->fcxp_tag);
1104 1104
1105 /** 1105 /*
1106 * setup request/response info 1106 * setup request/response info
1107 */ 1107 */
1108 reqi->bfa_rport = rport; 1108 reqi->bfa_rport = rport;
@@ -1117,7 +1117,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1117 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; 1117 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1118 fcxp->send_cbarg = cbarg; 1118 fcxp->send_cbarg = cbarg;
1119 1119
1120 /** 1120 /*
1121 * If no room in CPE queue, wait for space in request queue 1121 * If no room in CPE queue, wait for space in request queue
1122 */ 1122 */
1123 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); 1123 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
@@ -1131,7 +1131,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1131 bfa_fcxp_queue(fcxp, send_req); 1131 bfa_fcxp_queue(fcxp, send_req);
1132} 1132}
1133 1133
1134/** 1134/*
1135 * Abort a BFA FCXP 1135 * Abort a BFA FCXP
1136 * 1136 *
1137 * @param[in] fcxp BFA fcxp pointer 1137 * @param[in] fcxp BFA fcxp pointer
@@ -1185,7 +1185,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1185void 1185void
1186bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) 1186bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1187{ 1187{
1188 /** 1188 /*
1189 * If waiting for room in request queue, cancel reqq wait 1189 * If waiting for room in request queue, cancel reqq wait
1190 * and free fcxp. 1190 * and free fcxp.
1191 */ 1191 */
@@ -1201,7 +1201,7 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1201 1201
1202 1202
1203 1203
1204/** 1204/*
1205 * hal_fcxp_public BFA FCXP public functions 1205 * hal_fcxp_public BFA FCXP public functions
1206 */ 1206 */
1207 1207
@@ -1228,11 +1228,11 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1228} 1228}
1229 1229
1230 1230
1231/** 1231/*
1232 * BFA LPS state machine functions 1232 * BFA LPS state machine functions
1233 */ 1233 */
1234 1234
1235/** 1235/*
1236 * Init state -- no login 1236 * Init state -- no login
1237 */ 1237 */
1238static void 1238static void
@@ -1284,7 +1284,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1284 } 1284 }
1285} 1285}
1286 1286
1287/** 1287/*
1288 * login is in progress -- awaiting response from firmware 1288 * login is in progress -- awaiting response from firmware
1289 */ 1289 */
1290static void 1290static void
@@ -1326,7 +1326,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1326 } 1326 }
1327} 1327}
1328 1328
1329/** 1329/*
1330 * login pending - awaiting space in request queue 1330 * login pending - awaiting space in request queue
1331 */ 1331 */
1332static void 1332static void
@@ -1358,7 +1358,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1358 } 1358 }
1359} 1359}
1360 1360
1361/** 1361/*
1362 * login complete 1362 * login complete
1363 */ 1363 */
1364static void 1364static void
@@ -1399,7 +1399,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1399 } 1399 }
1400} 1400}
1401 1401
1402/** 1402/*
1403 * logout in progress - awaiting firmware response 1403 * logout in progress - awaiting firmware response
1404 */ 1404 */
1405static void 1405static void
@@ -1423,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1423 } 1423 }
1424} 1424}
1425 1425
1426/** 1426/*
1427 * logout pending -- awaiting space in request queue 1427 * logout pending -- awaiting space in request queue
1428 */ 1428 */
1429static void 1429static void
@@ -1450,11 +1450,11 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1450 1450
1451 1451
1452 1452
1453/** 1453/*
1454 * lps_pvt BFA LPS private functions 1454 * lps_pvt BFA LPS private functions
1455 */ 1455 */
1456 1456
1457/** 1457/*
1458 * return memory requirement 1458 * return memory requirement
1459 */ 1459 */
1460static void 1460static void
@@ -1467,7 +1467,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1467 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1467 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1468} 1468}
1469 1469
1470/** 1470/*
1471 * bfa module attach at initialization time 1471 * bfa module attach at initialization time
1472 */ 1472 */
1473static void 1473static void
@@ -1515,7 +1515,7 @@ bfa_lps_stop(struct bfa_s *bfa)
1515{ 1515{
1516} 1516}
1517 1517
1518/** 1518/*
1519 * IOC in disabled state -- consider all lps offline 1519 * IOC in disabled state -- consider all lps offline
1520 */ 1520 */
1521static void 1521static void
@@ -1531,7 +1531,7 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
1531 } 1531 }
1532} 1532}
1533 1533
1534/** 1534/*
1535 * Firmware login response 1535 * Firmware login response
1536 */ 1536 */
1537static void 1537static void
@@ -1578,7 +1578,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1578 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1578 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1579} 1579}
1580 1580
1581/** 1581/*
1582 * Firmware logout response 1582 * Firmware logout response
1583 */ 1583 */
1584static void 1584static void
@@ -1593,7 +1593,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1593 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1593 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1594} 1594}
1595 1595
1596/** 1596/*
1597 * Firmware received a Clear virtual link request (for FCoE) 1597 * Firmware received a Clear virtual link request (for FCoE)
1598 */ 1598 */
1599static void 1599static void
@@ -1607,7 +1607,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1607 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1607 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1608} 1608}
1609 1609
1610/** 1610/*
1611 * Space is available in request queue, resume queueing request to firmware. 1611 * Space is available in request queue, resume queueing request to firmware.
1612 */ 1612 */
1613static void 1613static void
@@ -1618,7 +1618,7 @@ bfa_lps_reqq_resume(void *lps_arg)
1618 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); 1618 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1619} 1619}
1620 1620
1621/** 1621/*
1622 * lps is freed -- triggered by vport delete 1622 * lps is freed -- triggered by vport delete
1623 */ 1623 */
1624static void 1624static void
@@ -1631,7 +1631,7 @@ bfa_lps_free(struct bfa_lps_s *lps)
1631 list_add_tail(&lps->qe, &mod->lps_free_q); 1631 list_add_tail(&lps->qe, &mod->lps_free_q);
1632} 1632}
1633 1633
1634/** 1634/*
1635 * send login request to firmware 1635 * send login request to firmware
1636 */ 1636 */
1637static void 1637static void
@@ -1656,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
1656 bfa_reqq_produce(lps->bfa, lps->reqq); 1656 bfa_reqq_produce(lps->bfa, lps->reqq);
1657} 1657}
1658 1658
1659/** 1659/*
1660 * send logout request to firmware 1660 * send logout request to firmware
1661 */ 1661 */
1662static void 1662static void
@@ -1675,7 +1675,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
1675 bfa_reqq_produce(lps->bfa, lps->reqq); 1675 bfa_reqq_produce(lps->bfa, lps->reqq);
1676} 1676}
1677 1677
1678/** 1678/*
1679 * Indirect login completion handler for non-fcs 1679 * Indirect login completion handler for non-fcs
1680 */ 1680 */
1681static void 1681static void
@@ -1692,7 +1692,7 @@ bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1692 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1692 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1693} 1693}
1694 1694
1695/** 1695/*
1696 * Login completion handler -- direct call for fcs, queue for others 1696 * Login completion handler -- direct call for fcs, queue for others
1697 */ 1697 */
1698static void 1698static void
@@ -1710,7 +1710,7 @@ bfa_lps_login_comp(struct bfa_lps_s *lps)
1710 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); 1710 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1711} 1711}
1712 1712
1713/** 1713/*
1714 * Indirect logout completion handler for non-fcs 1714 * Indirect logout completion handler for non-fcs
1715 */ 1715 */
1716static void 1716static void
@@ -1725,7 +1725,7 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1725 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1725 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1726} 1726}
1727 1727
1728/** 1728/*
1729 * Logout completion handler -- direct call for fcs, queue for others 1729 * Logout completion handler -- direct call for fcs, queue for others
1730 */ 1730 */
1731static void 1731static void
@@ -1740,7 +1740,7 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
1740 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1740 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1741} 1741}
1742 1742
1743/** 1743/*
1744 * Clear virtual link completion handler for non-fcs 1744 * Clear virtual link completion handler for non-fcs
1745 */ 1745 */
1746static void 1746static void
@@ -1756,7 +1756,7 @@ bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1756 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); 1756 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1757} 1757}
1758 1758
1759/** 1759/*
1760 * Received Clear virtual link event --direct call for fcs, 1760 * Received Clear virtual link event --direct call for fcs,
1761 * queue for others 1761 * queue for others
1762 */ 1762 */
@@ -1776,7 +1776,7 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps)
1776 1776
1777 1777
1778 1778
1779/** 1779/*
1780 * lps_public BFA LPS public functions 1780 * lps_public BFA LPS public functions
1781 */ 1781 */
1782 1782
@@ -1789,7 +1789,7 @@ bfa_lps_get_max_vport(struct bfa_s *bfa)
1789 return BFA_LPS_MAX_VPORTS_SUPP_CB; 1789 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1790} 1790}
1791 1791
1792/** 1792/*
1793 * Allocate a lport srvice tag. 1793 * Allocate a lport srvice tag.
1794 */ 1794 */
1795struct bfa_lps_s * 1795struct bfa_lps_s *
@@ -1809,7 +1809,7 @@ bfa_lps_alloc(struct bfa_s *bfa)
1809 return lps; 1809 return lps;
1810} 1810}
1811 1811
1812/** 1812/*
1813 * Free lport service tag. This can be called anytime after an alloc. 1813 * Free lport service tag. This can be called anytime after an alloc.
1814 * No need to wait for any pending login/logout completions. 1814 * No need to wait for any pending login/logout completions.
1815 */ 1815 */
@@ -1819,7 +1819,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
1819 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); 1819 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1820} 1820}
1821 1821
1822/** 1822/*
1823 * Initiate a lport login. 1823 * Initiate a lport login.
1824 */ 1824 */
1825void 1825void
@@ -1836,7 +1836,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1836 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1836 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1837} 1837}
1838 1838
1839/** 1839/*
1840 * Initiate a lport fdisc login. 1840 * Initiate a lport fdisc login.
1841 */ 1841 */
1842void 1842void
@@ -1853,7 +1853,7 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1853 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1853 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1854} 1854}
1855 1855
1856/** 1856/*
1857 * Initiate a lport logout (flogi). 1857 * Initiate a lport logout (flogi).
1858 */ 1858 */
1859void 1859void
@@ -1862,7 +1862,7 @@ bfa_lps_flogo(struct bfa_lps_s *lps)
1862 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1862 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1863} 1863}
1864 1864
1865/** 1865/*
1866 * Initiate a lport FDSIC logout. 1866 * Initiate a lport FDSIC logout.
1867 */ 1867 */
1868void 1868void
@@ -1871,7 +1871,7 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1872} 1872}
1873 1873
1874/** 1874/*
1875 * Discard a pending login request -- should be called only for 1875 * Discard a pending login request -- should be called only for
1876 * link down handling. 1876 * link down handling.
1877 */ 1877 */
@@ -1881,7 +1881,7 @@ bfa_lps_discard(struct bfa_lps_s *lps)
1881 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1881 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1882} 1882}
1883 1883
1884/** 1884/*
1885 * Return lport services tag 1885 * Return lport services tag
1886 */ 1886 */
1887u8 1887u8
@@ -1890,7 +1890,7 @@ bfa_lps_get_tag(struct bfa_lps_s *lps)
1890 return lps->lp_tag; 1890 return lps->lp_tag;
1891} 1891}
1892 1892
1893/** 1893/*
1894 * Return lport services tag given the pid 1894 * Return lport services tag given the pid
1895 */ 1895 */
1896u8 1896u8
@@ -1909,7 +1909,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1909 return 0; 1909 return 0;
1910} 1910}
1911 1911
1912/** 1912/*
1913 * return if fabric login indicates support for NPIV 1913 * return if fabric login indicates support for NPIV
1914 */ 1914 */
1915bfa_boolean_t 1915bfa_boolean_t
@@ -1918,7 +1918,7 @@ bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1918 return lps->npiv_en; 1918 return lps->npiv_en;
1919} 1919}
1920 1920
1921/** 1921/*
1922 * Return TRUE if attached to F-Port, else return FALSE 1922 * Return TRUE if attached to F-Port, else return FALSE
1923 */ 1923 */
1924bfa_boolean_t 1924bfa_boolean_t
@@ -1927,7 +1927,7 @@ bfa_lps_is_fport(struct bfa_lps_s *lps)
1927 return lps->fport; 1927 return lps->fport;
1928} 1928}
1929 1929
1930/** 1930/*
1931 * Return TRUE if attached to a Brocade Fabric 1931 * Return TRUE if attached to a Brocade Fabric
1932 */ 1932 */
1933bfa_boolean_t 1933bfa_boolean_t
@@ -1935,7 +1935,7 @@ bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1935{ 1935{
1936 return lps->brcd_switch; 1936 return lps->brcd_switch;
1937} 1937}
1938/** 1938/*
1939 * return TRUE if authentication is required 1939 * return TRUE if authentication is required
1940 */ 1940 */
1941bfa_boolean_t 1941bfa_boolean_t
@@ -1950,7 +1950,7 @@ bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1950 return lps->ext_status; 1950 return lps->ext_status;
1951} 1951}
1952 1952
1953/** 1953/*
1954 * return port id assigned to the lport 1954 * return port id assigned to the lport
1955 */ 1955 */
1956u32 1956u32
@@ -1959,7 +1959,7 @@ bfa_lps_get_pid(struct bfa_lps_s *lps)
1959 return lps->lp_pid; 1959 return lps->lp_pid;
1960} 1960}
1961 1961
1962/** 1962/*
1963 * return port id assigned to the base lport 1963 * return port id assigned to the base lport
1964 */ 1964 */
1965u32 1965u32
@@ -1970,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
1970 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; 1970 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1971} 1971}
1972 1972
1973/** 1973/*
1974 * Return bb_credit assigned in FLOGI response 1974 * Return bb_credit assigned in FLOGI response
1975 */ 1975 */
1976u16 1976u16
@@ -1979,7 +1979,7 @@ bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1979 return lps->pr_bbcred; 1979 return lps->pr_bbcred;
1980} 1980}
1981 1981
1982/** 1982/*
1983 * Return peer port name 1983 * Return peer port name
1984 */ 1984 */
1985wwn_t 1985wwn_t
@@ -1988,7 +1988,7 @@ bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1988 return lps->pr_pwwn; 1988 return lps->pr_pwwn;
1989} 1989}
1990 1990
1991/** 1991/*
1992 * Return peer node name 1992 * Return peer node name
1993 */ 1993 */
1994wwn_t 1994wwn_t
@@ -1997,7 +1997,7 @@ bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1997 return lps->pr_nwwn; 1997 return lps->pr_nwwn;
1998} 1998}
1999 1999
2000/** 2000/*
2001 * return reason code if login request is rejected 2001 * return reason code if login request is rejected
2002 */ 2002 */
2003u8 2003u8
@@ -2006,7 +2006,7 @@ bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2006 return lps->lsrjt_rsn; 2006 return lps->lsrjt_rsn;
2007} 2007}
2008 2008
2009/** 2009/*
2010 * return explanation code if login request is rejected 2010 * return explanation code if login request is rejected
2011 */ 2011 */
2012u8 2012u8
@@ -2015,7 +2015,7 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2015 return lps->lsrjt_expl; 2015 return lps->lsrjt_expl;
2016} 2016}
2017 2017
2018/** 2018/*
2019 * Return fpma/spma MAC for lport 2019 * Return fpma/spma MAC for lport
2020 */ 2020 */
2021mac_t 2021mac_t
@@ -2024,7 +2024,7 @@ bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2024 return lps->lp_mac; 2024 return lps->lp_mac;
2025} 2025}
2026 2026
2027/** 2027/*
2028 * LPS firmware message class handler. 2028 * LPS firmware message class handler.
2029 */ 2029 */
2030void 2030void
@@ -2054,7 +2054,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2054 } 2054 }
2055} 2055}
2056 2056
2057/** 2057/*
2058 * FC PORT state machine functions 2058 * FC PORT state machine functions
2059 */ 2059 */
2060static void 2060static void
@@ -2065,7 +2065,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2065 2065
2066 switch (event) { 2066 switch (event) {
2067 case BFA_FCPORT_SM_START: 2067 case BFA_FCPORT_SM_START:
2068 /** 2068 /*
2069 * Start event after IOC is configured and BFA is started. 2069 * Start event after IOC is configured and BFA is started.
2070 */ 2070 */
2071 if (bfa_fcport_send_enable(fcport)) { 2071 if (bfa_fcport_send_enable(fcport)) {
@@ -2079,7 +2079,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2079 break; 2079 break;
2080 2080
2081 case BFA_FCPORT_SM_ENABLE: 2081 case BFA_FCPORT_SM_ENABLE:
2082 /** 2082 /*
2083 * Port is persistently configured to be in enabled state. Do 2083 * Port is persistently configured to be in enabled state. Do
2084 * not change state. Port enabling is done when START event is 2084 * not change state. Port enabling is done when START event is
2085 * received. 2085 * received.
@@ -2087,7 +2087,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2087 break; 2087 break;
2088 2088
2089 case BFA_FCPORT_SM_DISABLE: 2089 case BFA_FCPORT_SM_DISABLE:
2090 /** 2090 /*
2091 * If a port is persistently configured to be disabled, the 2091 * If a port is persistently configured to be disabled, the
2092 * first event will a port disable request. 2092 * first event will a port disable request.
2093 */ 2093 */
@@ -2123,13 +2123,13 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2123 break; 2123 break;
2124 2124
2125 case BFA_FCPORT_SM_ENABLE: 2125 case BFA_FCPORT_SM_ENABLE:
2126 /** 2126 /*
2127 * Already enable is in progress. 2127 * Already enable is in progress.
2128 */ 2128 */
2129 break; 2129 break;
2130 2130
2131 case BFA_FCPORT_SM_DISABLE: 2131 case BFA_FCPORT_SM_DISABLE:
2132 /** 2132 /*
2133 * Just send disable request to firmware when room becomes 2133 * Just send disable request to firmware when room becomes
2134 * available in request queue. 2134 * available in request queue.
2135 */ 2135 */
@@ -2144,7 +2144,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2144 2144
2145 case BFA_FCPORT_SM_LINKUP: 2145 case BFA_FCPORT_SM_LINKUP:
2146 case BFA_FCPORT_SM_LINKDOWN: 2146 case BFA_FCPORT_SM_LINKDOWN:
2147 /** 2147 /*
2148 * Possible to get link events when doing back-to-back 2148 * Possible to get link events when doing back-to-back
2149 * enable/disables. 2149 * enable/disables.
2150 */ 2150 */
@@ -2183,7 +2183,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2183 break; 2183 break;
2184 2184
2185 case BFA_FCPORT_SM_ENABLE: 2185 case BFA_FCPORT_SM_ENABLE:
2186 /** 2186 /*
2187 * Already being enabled. 2187 * Already being enabled.
2188 */ 2188 */
2189 break; 2189 break;
@@ -2256,13 +2256,13 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2256 break; 2256 break;
2257 2257
2258 case BFA_FCPORT_SM_LINKDOWN: 2258 case BFA_FCPORT_SM_LINKDOWN:
2259 /** 2259 /*
2260 * Possible to get link down event. 2260 * Possible to get link down event.
2261 */ 2261 */
2262 break; 2262 break;
2263 2263
2264 case BFA_FCPORT_SM_ENABLE: 2264 case BFA_FCPORT_SM_ENABLE:
2265 /** 2265 /*
2266 * Already enabled. 2266 * Already enabled.
2267 */ 2267 */
2268 break; 2268 break;
@@ -2305,7 +2305,7 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2305 2305
2306 switch (event) { 2306 switch (event) {
2307 case BFA_FCPORT_SM_ENABLE: 2307 case BFA_FCPORT_SM_ENABLE:
2308 /** 2308 /*
2309 * Already enabled. 2309 * Already enabled.
2310 */ 2310 */
2311 break; 2311 break;
@@ -2398,14 +2398,14 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2398 break; 2398 break;
2399 2399
2400 case BFA_FCPORT_SM_DISABLE: 2400 case BFA_FCPORT_SM_DISABLE:
2401 /** 2401 /*
2402 * Already being disabled. 2402 * Already being disabled.
2403 */ 2403 */
2404 break; 2404 break;
2405 2405
2406 case BFA_FCPORT_SM_LINKUP: 2406 case BFA_FCPORT_SM_LINKUP:
2407 case BFA_FCPORT_SM_LINKDOWN: 2407 case BFA_FCPORT_SM_LINKDOWN:
2408 /** 2408 /*
2409 * Possible to get link events when doing back-to-back 2409 * Possible to get link events when doing back-to-back
2410 * enable/disables. 2410 * enable/disables.
2411 */ 2411 */
@@ -2452,7 +2452,7 @@ bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2452 2452
2453 case BFA_FCPORT_SM_LINKUP: 2453 case BFA_FCPORT_SM_LINKUP:
2454 case BFA_FCPORT_SM_LINKDOWN: 2454 case BFA_FCPORT_SM_LINKDOWN:
2455 /** 2455 /*
2456 * Possible to get link events when doing back-to-back 2456 * Possible to get link events when doing back-to-back
2457 * enable/disables. 2457 * enable/disables.
2458 */ 2458 */
@@ -2482,7 +2482,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2482 break; 2482 break;
2483 2483
2484 case BFA_FCPORT_SM_DISABLE: 2484 case BFA_FCPORT_SM_DISABLE:
2485 /** 2485 /*
2486 * Already being disabled. 2486 * Already being disabled.
2487 */ 2487 */
2488 break; 2488 break;
@@ -2507,7 +2507,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2507 2507
2508 case BFA_FCPORT_SM_LINKUP: 2508 case BFA_FCPORT_SM_LINKUP:
2509 case BFA_FCPORT_SM_LINKDOWN: 2509 case BFA_FCPORT_SM_LINKDOWN:
2510 /** 2510 /*
2511 * Possible to get link events when doing back-to-back 2511 * Possible to get link events when doing back-to-back
2512 * enable/disables. 2512 * enable/disables.
2513 */ 2513 */
@@ -2532,7 +2532,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2532 2532
2533 switch (event) { 2533 switch (event) {
2534 case BFA_FCPORT_SM_START: 2534 case BFA_FCPORT_SM_START:
2535 /** 2535 /*
2536 * Ignore start event for a port that is disabled. 2536 * Ignore start event for a port that is disabled.
2537 */ 2537 */
2538 break; 2538 break;
@@ -2556,7 +2556,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2556 break; 2556 break;
2557 2557
2558 case BFA_FCPORT_SM_DISABLE: 2558 case BFA_FCPORT_SM_DISABLE:
2559 /** 2559 /*
2560 * Already disabled. 2560 * Already disabled.
2561 */ 2561 */
2562 break; 2562 break;
@@ -2586,14 +2586,14 @@ bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2586 break; 2586 break;
2587 2587
2588 default: 2588 default:
2589 /** 2589 /*
2590 * Ignore all other events. 2590 * Ignore all other events.
2591 */ 2591 */
2592 ; 2592 ;
2593 } 2593 }
2594} 2594}
2595 2595
2596/** 2596/*
2597 * Port is enabled. IOC is down/failed. 2597 * Port is enabled. IOC is down/failed.
2598 */ 2598 */
2599static void 2599static void
@@ -2612,14 +2612,14 @@ bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2612 break; 2612 break;
2613 2613
2614 default: 2614 default:
2615 /** 2615 /*
2616 * Ignore all events. 2616 * Ignore all events.
2617 */ 2617 */
2618 ; 2618 ;
2619 } 2619 }
2620} 2620}
2621 2621
2622/** 2622/*
2623 * Port is disabled. IOC is down/failed. 2623 * Port is disabled. IOC is down/failed.
2624 */ 2624 */
2625static void 2625static void
@@ -2638,14 +2638,14 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2638 break; 2638 break;
2639 2639
2640 default: 2640 default:
2641 /** 2641 /*
2642 * Ignore all events. 2642 * Ignore all events.
2643 */ 2643 */
2644 ; 2644 ;
2645 } 2645 }
2646} 2646}
2647 2647
2648/** 2648/*
2649 * Link state is down 2649 * Link state is down
2650 */ 2650 */
2651static void 2651static void
@@ -2665,7 +2665,7 @@ bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2665 } 2665 }
2666} 2666}
2667 2667
2668/** 2668/*
2669 * Link state is waiting for down notification 2669 * Link state is waiting for down notification
2670 */ 2670 */
2671static void 2671static void
@@ -2688,7 +2688,7 @@ bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2688 } 2688 }
2689} 2689}
2690 2690
2691/** 2691/*
2692 * Link state is waiting for down notification and there is a pending up 2692 * Link state is waiting for down notification and there is a pending up
2693 */ 2693 */
2694static void 2694static void
@@ -2712,7 +2712,7 @@ bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2712 } 2712 }
2713} 2713}
2714 2714
2715/** 2715/*
2716 * Link state is up 2716 * Link state is up
2717 */ 2717 */
2718static void 2718static void
@@ -2732,7 +2732,7 @@ bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2732 } 2732 }
2733} 2733}
2734 2734
2735/** 2735/*
2736 * Link state is waiting for up notification 2736 * Link state is waiting for up notification
2737 */ 2737 */
2738static void 2738static void
@@ -2755,7 +2755,7 @@ bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2755 } 2755 }
2756} 2756}
2757 2757
2758/** 2758/*
2759 * Link state is waiting for up notification and there is a pending down 2759 * Link state is waiting for up notification and there is a pending down
2760 */ 2760 */
2761static void 2761static void
@@ -2779,7 +2779,7 @@ bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2779 } 2779 }
2780} 2780}
2781 2781
2782/** 2782/*
2783 * Link state is waiting for up notification and there are pending down and up 2783 * Link state is waiting for up notification and there are pending down and up
2784 */ 2784 */
2785static void 2785static void
@@ -2805,7 +2805,7 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2805 2805
2806 2806
2807 2807
2808/** 2808/*
2809 * hal_port_private 2809 * hal_port_private
2810 */ 2810 */
2811 2811
@@ -2820,7 +2820,7 @@ __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2820 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); 2820 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2821} 2821}
2822 2822
2823/** 2823/*
2824 * Send SCN notification to upper layers. 2824 * Send SCN notification to upper layers.
2825 * trunk - false if caller is fcport to ignore fcport event in trunked mode 2825 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2826 */ 2826 */
@@ -2896,7 +2896,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2896 bfa_meminfo_dma_phys(meminfo) = dm_pa; 2896 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2897} 2897}
2898 2898
2899/** 2899/*
2900 * Memory initialization. 2900 * Memory initialization.
2901 */ 2901 */
2902static void 2902static void
@@ -2917,13 +2917,13 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2917 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2917 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2918 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2918 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2919 2919
2920 /** 2920 /*
2921 * initialize time stamp for stats reset 2921 * initialize time stamp for stats reset
2922 */ 2922 */
2923 bfa_os_gettimeofday(&tv); 2923 bfa_os_gettimeofday(&tv);
2924 fcport->stats_reset_time = tv.tv_sec; 2924 fcport->stats_reset_time = tv.tv_sec;
2925 2925
2926 /** 2926 /*
2927 * initialize and set default configuration 2927 * initialize and set default configuration
2928 */ 2928 */
2929 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; 2929 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
@@ -2941,7 +2941,7 @@ bfa_fcport_detach(struct bfa_s *bfa)
2941{ 2941{
2942} 2942}
2943 2943
2944/** 2944/*
2945 * Called when IOC is ready. 2945 * Called when IOC is ready.
2946 */ 2946 */
2947static void 2947static void
@@ -2950,7 +2950,7 @@ bfa_fcport_start(struct bfa_s *bfa)
2950 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); 2950 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2951} 2951}
2952 2952
2953/** 2953/*
2954 * Called before IOC is stopped. 2954 * Called before IOC is stopped.
2955 */ 2955 */
2956static void 2956static void
@@ -2960,7 +2960,7 @@ bfa_fcport_stop(struct bfa_s *bfa)
2960 bfa_trunk_iocdisable(bfa); 2960 bfa_trunk_iocdisable(bfa);
2961} 2961}
2962 2962
2963/** 2963/*
2964 * Called when IOC failure is detected. 2964 * Called when IOC failure is detected.
2965 */ 2965 */
2966static void 2966static void
@@ -2988,7 +2988,7 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2988 fcport->qos_attr = pevent->link_state.qos_attr; 2988 fcport->qos_attr = pevent->link_state.qos_attr;
2989 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 2989 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2990 2990
2991 /** 2991 /*
2992 * update trunk state if applicable 2992 * update trunk state if applicable
2993 */ 2993 */
2994 if (!fcport->cfg.trunked) 2994 if (!fcport->cfg.trunked)
@@ -3008,7 +3008,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3008 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 3008 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3009} 3009}
3010 3010
3011/** 3011/*
3012 * Send port enable message to firmware. 3012 * Send port enable message to firmware.
3013 */ 3013 */
3014static bfa_boolean_t 3014static bfa_boolean_t
@@ -3016,13 +3016,13 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3016{ 3016{
3017 struct bfi_fcport_enable_req_s *m; 3017 struct bfi_fcport_enable_req_s *m;
3018 3018
3019 /** 3019 /*
3020 * Increment message tag before queue check, so that responses to old 3020 * Increment message tag before queue check, so that responses to old
3021 * requests are discarded. 3021 * requests are discarded.
3022 */ 3022 */
3023 fcport->msgtag++; 3023 fcport->msgtag++;
3024 3024
3025 /** 3025 /*
3026 * check for room in queue to send request now 3026 * check for room in queue to send request now
3027 */ 3027 */
3028 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3028 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3043,14 +3043,14 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3043 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); 3043 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3044 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); 3044 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3045 3045
3046 /** 3046 /*
3047 * queue I/O message to firmware 3047 * queue I/O message to firmware
3048 */ 3048 */
3049 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3049 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3050 return BFA_TRUE; 3050 return BFA_TRUE;
3051} 3051}
3052 3052
3053/** 3053/*
3054 * Send port disable message to firmware. 3054 * Send port disable message to firmware.
3055 */ 3055 */
3056static bfa_boolean_t 3056static bfa_boolean_t
@@ -3058,13 +3058,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3058{ 3058{
3059 struct bfi_fcport_req_s *m; 3059 struct bfi_fcport_req_s *m;
3060 3060
3061 /** 3061 /*
3062 * Increment message tag before queue check, so that responses to old 3062 * Increment message tag before queue check, so that responses to old
3063 * requests are discarded. 3063 * requests are discarded.
3064 */ 3064 */
3065 fcport->msgtag++; 3065 fcport->msgtag++;
3066 3066
3067 /** 3067 /*
3068 * check for room in queue to send request now 3068 * check for room in queue to send request now
3069 */ 3069 */
3070 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3070 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3078,7 +3078,7 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3078 bfa_lpuid(fcport->bfa)); 3078 bfa_lpuid(fcport->bfa));
3079 m->msgtag = fcport->msgtag; 3079 m->msgtag = fcport->msgtag;
3080 3080
3081 /** 3081 /*
3082 * queue I/O message to firmware 3082 * queue I/O message to firmware
3083 */ 3083 */
3084 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3084 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3103,7 +3103,7 @@ bfa_fcport_send_txcredit(void *port_cbarg)
3103 struct bfa_fcport_s *fcport = port_cbarg; 3103 struct bfa_fcport_s *fcport = port_cbarg;
3104 struct bfi_fcport_set_svc_params_req_s *m; 3104 struct bfi_fcport_set_svc_params_req_s *m;
3105 3105
3106 /** 3106 /*
3107 * check for room in queue to send request now 3107 * check for room in queue to send request now
3108 */ 3108 */
3109 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); 3109 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3116,7 +3116,7 @@ bfa_fcport_send_txcredit(void *port_cbarg)
3116 bfa_lpuid(fcport->bfa)); 3116 bfa_lpuid(fcport->bfa));
3117 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); 3117 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3118 3118
3119 /** 3119 /*
3120 * queue I/O message to firmware 3120 * queue I/O message to firmware
3121 */ 3121 */
3122 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3122 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3235,7 +3235,7 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3235 if (complete) { 3235 if (complete) {
3236 struct bfa_timeval_s tv; 3236 struct bfa_timeval_s tv;
3237 3237
3238 /** 3238 /*
3239 * re-initialize time stamp for stats reset 3239 * re-initialize time stamp for stats reset
3240 */ 3240 */
3241 bfa_os_gettimeofday(&tv); 3241 bfa_os_gettimeofday(&tv);
@@ -3289,7 +3289,7 @@ bfa_fcport_send_stats_clear(void *cbarg)
3289 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3289 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3290} 3290}
3291 3291
3292/** 3292/*
3293 * Handle trunk SCN event from firmware. 3293 * Handle trunk SCN event from firmware.
3294 */ 3294 */
3295static void 3295static void
@@ -3310,7 +3310,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3310 bfa_trc(fcport->bfa, scn->trunk_state); 3310 bfa_trc(fcport->bfa, scn->trunk_state);
3311 bfa_trc(fcport->bfa, scn->trunk_speed); 3311 bfa_trc(fcport->bfa, scn->trunk_speed);
3312 3312
3313 /** 3313 /*
3314 * Save off new state for trunk attribute query 3314 * Save off new state for trunk attribute query
3315 */ 3315 */
3316 state_prev = trunk->attr.state; 3316 state_prev = trunk->attr.state;
@@ -3358,7 +3358,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3358 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); 3358 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3359 } 3359 }
3360 3360
3361 /** 3361 /*
3362 * Notify upper layers if trunk state changed. 3362 * Notify upper layers if trunk state changed.
3363 */ 3363 */
3364 if ((state_prev != trunk->attr.state) || 3364 if ((state_prev != trunk->attr.state) ||
@@ -3374,7 +3374,7 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3375 int i = 0; 3375 int i = 0;
3376 3376
3377 /** 3377 /*
3378 * In trunked mode, notify upper layers that link is down 3378 * In trunked mode, notify upper layers that link is down
3379 */ 3379 */
3380 if (fcport->cfg.trunked) { 3380 if (fcport->cfg.trunked) {
@@ -3398,11 +3398,11 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
3398 3398
3399 3399
3400 3400
3401/** 3401/*
3402 * hal_port_public 3402 * hal_port_public
3403 */ 3403 */
3404 3404
3405/** 3405/*
3406 * Called to initialize port attributes 3406 * Called to initialize port attributes
3407 */ 3407 */
3408void 3408void
@@ -3410,7 +3410,7 @@ bfa_fcport_init(struct bfa_s *bfa)
3410{ 3410{
3411 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3411 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3412 3412
3413 /** 3413 /*
3414 * Initialize port attributes from IOC hardware data. 3414 * Initialize port attributes from IOC hardware data.
3415 */ 3415 */
3416 bfa_fcport_set_wwns(fcport); 3416 bfa_fcport_set_wwns(fcport);
@@ -3424,7 +3424,7 @@ bfa_fcport_init(struct bfa_s *bfa)
3424 bfa_assert(fcport->speed_sup); 3424 bfa_assert(fcport->speed_sup);
3425} 3425}
3426 3426
3427/** 3427/*
3428 * Firmware message handler. 3428 * Firmware message handler.
3429 */ 3429 */
3430void 3430void
@@ -3505,11 +3505,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3505 3505
3506 3506
3507 3507
3508/** 3508/*
3509 * hal_port_api 3509 * hal_port_api
3510 */ 3510 */
3511 3511
3512/** 3512/*
3513 * Registered callback for port events. 3513 * Registered callback for port events.
3514 */ 3514 */
3515void 3515void
@@ -3550,7 +3550,7 @@ bfa_fcport_disable(struct bfa_s *bfa)
3550 return BFA_STATUS_OK; 3550 return BFA_STATUS_OK;
3551} 3551}
3552 3552
3553/** 3553/*
3554 * Configure port speed. 3554 * Configure port speed.
3555 */ 3555 */
3556bfa_status_t 3556bfa_status_t
@@ -3572,7 +3572,7 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3572 return BFA_STATUS_OK; 3572 return BFA_STATUS_OK;
3573} 3573}
3574 3574
3575/** 3575/*
3576 * Get current speed. 3576 * Get current speed.
3577 */ 3577 */
3578enum bfa_port_speed 3578enum bfa_port_speed
@@ -3583,7 +3583,7 @@ bfa_fcport_get_speed(struct bfa_s *bfa)
3583 return fcport->speed; 3583 return fcport->speed;
3584} 3584}
3585 3585
3586/** 3586/*
3587 * Configure port topology. 3587 * Configure port topology.
3588 */ 3588 */
3589bfa_status_t 3589bfa_status_t
@@ -3608,7 +3608,7 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3608 return BFA_STATUS_OK; 3608 return BFA_STATUS_OK;
3609} 3609}
3610 3610
3611/** 3611/*
3612 * Get current topology. 3612 * Get current topology.
3613 */ 3613 */
3614enum bfa_port_topology 3614enum bfa_port_topology
@@ -3708,7 +3708,7 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3708 bfa_fcport_send_txcredit(fcport); 3708 bfa_fcport_send_txcredit(fcport);
3709} 3709}
3710 3710
3711/** 3711/*
3712 * Get port attributes. 3712 * Get port attributes.
3713 */ 3713 */
3714 3714
@@ -3768,7 +3768,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3768 3768
3769#define BFA_FCPORT_STATS_TOV 1000 3769#define BFA_FCPORT_STATS_TOV 1000
3770 3770
3771/** 3771/*
3772 * Fetch port statistics (FCQoS or FCoE). 3772 * Fetch port statistics (FCQoS or FCoE).
3773 */ 3773 */
3774bfa_status_t 3774bfa_status_t
@@ -3794,7 +3794,7 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3794 return BFA_STATUS_OK; 3794 return BFA_STATUS_OK;
3795} 3795}
3796 3796
3797/** 3797/*
3798 * Reset port statistics (FCQoS or FCoE). 3798 * Reset port statistics (FCQoS or FCoE).
3799 */ 3799 */
3800bfa_status_t 3800bfa_status_t
@@ -3818,7 +3818,7 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3818 return BFA_STATUS_OK; 3818 return BFA_STATUS_OK;
3819} 3819}
3820 3820
3821/** 3821/*
3822 * Fetch FCQoS port statistics 3822 * Fetch FCQoS port statistics
3823 */ 3823 */
3824bfa_status_t 3824bfa_status_t
@@ -3831,7 +3831,7 @@ bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3831 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3831 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3832} 3832}
3833 3833
3834/** 3834/*
3835 * Reset FCoE port statistics 3835 * Reset FCoE port statistics
3836 */ 3836 */
3837bfa_status_t 3837bfa_status_t
@@ -3843,7 +3843,7 @@ bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3843 return bfa_fcport_clear_stats(bfa, cbfn, cbarg); 3843 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3844} 3844}
3845 3845
3846/** 3846/*
3847 * Fetch FCQoS port statistics 3847 * Fetch FCQoS port statistics
3848 */ 3848 */
3849bfa_status_t 3849bfa_status_t
@@ -3856,7 +3856,7 @@ bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3856 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); 3856 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3857} 3857}
3858 3858
3859/** 3859/*
3860 * Reset FCoE port statistics 3860 * Reset FCoE port statistics
3861 */ 3861 */
3862bfa_status_t 3862bfa_status_t
@@ -3902,7 +3902,7 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3902 } 3902 }
3903} 3903}
3904 3904
3905/** 3905/*
3906 * Fetch port attributes. 3906 * Fetch port attributes.
3907 */ 3907 */
3908bfa_boolean_t 3908bfa_boolean_t
@@ -3937,7 +3937,7 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3937 3937
3938 if (ioc_type == BFA_IOC_TYPE_FC) { 3938 if (ioc_type == BFA_IOC_TYPE_FC) {
3939 fcport->cfg.qos_enabled = on_off; 3939 fcport->cfg.qos_enabled = on_off;
3940 /** 3940 /*
3941 * Notify fcpim of the change in QoS state 3941 * Notify fcpim of the change in QoS state
3942 */ 3942 */
3943 bfa_fcpim_update_ioredirect(bfa); 3943 bfa_fcpim_update_ioredirect(bfa);
@@ -3957,7 +3957,7 @@ bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3957 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 3957 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3958} 3958}
3959 3959
3960/** 3960/*
3961 * Configure default minimum ratelim speed 3961 * Configure default minimum ratelim speed
3962 */ 3962 */
3963bfa_status_t 3963bfa_status_t
@@ -3978,7 +3978,7 @@ bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3978 return BFA_STATUS_OK; 3978 return BFA_STATUS_OK;
3979} 3979}
3980 3980
3981/** 3981/*
3982 * Get default minimum ratelim speed 3982 * Get default minimum ratelim speed
3983 */ 3983 */
3984enum bfa_port_speed 3984enum bfa_port_speed
@@ -4093,10 +4093,10 @@ bfa_trunk_disable(struct bfa_s *bfa)
4093} 4093}
4094 4094
4095 4095
4096/** 4096/*
4097 * Rport State machine functions 4097 * Rport State machine functions
4098 */ 4098 */
4099/** 4099/*
4100 * Beginning state, only online event expected. 4100 * Beginning state, only online event expected.
4101 */ 4101 */
4102static void 4102static void
@@ -4149,7 +4149,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4149 } 4149 }
4150} 4150}
4151 4151
4152/** 4152/*
4153 * Waiting for rport create response from firmware. 4153 * Waiting for rport create response from firmware.
4154 */ 4154 */
4155static void 4155static void
@@ -4186,7 +4186,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4186 } 4186 }
4187} 4187}
4188 4188
4189/** 4189/*
4190 * Request queue is full, awaiting queue resume to send create request. 4190 * Request queue is full, awaiting queue resume to send create request.
4191 */ 4191 */
4192static void 4192static void
@@ -4227,7 +4227,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4227 } 4227 }
4228} 4228}
4229 4229
4230/** 4230/*
4231 * Online state - normal parking state. 4231 * Online state - normal parking state.
4232 */ 4232 */
4233static void 4233static void
@@ -4295,7 +4295,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4295 } 4295 }
4296} 4296}
4297 4297
4298/** 4298/*
4299 * Firmware rport is being deleted - awaiting f/w response. 4299 * Firmware rport is being deleted - awaiting f/w response.
4300 */ 4300 */
4301static void 4301static void
@@ -4358,7 +4358,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4358 } 4358 }
4359} 4359}
4360 4360
4361/** 4361/*
4362 * Offline state. 4362 * Offline state.
4363 */ 4363 */
4364static void 4364static void
@@ -4393,7 +4393,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4393 } 4393 }
4394} 4394}
4395 4395
4396/** 4396/*
4397 * Rport is deleted, waiting for firmware response to delete. 4397 * Rport is deleted, waiting for firmware response to delete.
4398 */ 4398 */
4399static void 4399static void
@@ -4445,7 +4445,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4445 } 4445 }
4446} 4446}
4447 4447
4448/** 4448/*
4449 * Waiting for rport create response from firmware. A delete is pending. 4449 * Waiting for rport create response from firmware. A delete is pending.
4450 */ 4450 */
4451static void 4451static void
@@ -4476,7 +4476,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4476 } 4476 }
4477} 4477}
4478 4478
4479/** 4479/*
4480 * Waiting for rport create response from firmware. Rport offline is pending. 4480 * Waiting for rport create response from firmware. Rport offline is pending.
4481 */ 4481 */
4482static void 4482static void
@@ -4511,7 +4511,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4511 } 4511 }
4512} 4512}
4513 4513
4514/** 4514/*
4515 * IOC h/w failed. 4515 * IOC h/w failed.
4516 */ 4516 */
4517static void 4517static void
@@ -4551,7 +4551,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4551 4551
4552 4552
4553 4553
4554/** 4554/*
4555 * bfa_rport_private BFA rport private functions 4555 * bfa_rport_private BFA rport private functions
4556 */ 4556 */
4557 4557
@@ -4615,7 +4615,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4615 rp->rport_tag = i; 4615 rp->rport_tag = i;
4616 bfa_sm_set_state(rp, bfa_rport_sm_uninit); 4616 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4617 4617
4618 /** 4618 /*
4619 * - is unused 4619 * - is unused
4620 */ 4620 */
4621 if (i) 4621 if (i)
@@ -4624,7 +4624,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4624 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); 4624 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4625 } 4625 }
4626 4626
4627 /** 4627 /*
4628 * consume memory 4628 * consume memory
4629 */ 4629 */
4630 bfa_meminfo_kva(meminfo) = (u8 *) rp; 4630 bfa_meminfo_kva(meminfo) = (u8 *) rp;
@@ -4685,7 +4685,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4685{ 4685{
4686 struct bfi_rport_create_req_s *m; 4686 struct bfi_rport_create_req_s *m;
4687 4687
4688 /** 4688 /*
4689 * check for room in queue to send request now 4689 * check for room in queue to send request now
4690 */ 4690 */
4691 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4691 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4706,7 +4706,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4706 m->vf_id = rp->rport_info.vf_id; 4706 m->vf_id = rp->rport_info.vf_id;
4707 m->cisc = rp->rport_info.cisc; 4707 m->cisc = rp->rport_info.cisc;
4708 4708
4709 /** 4709 /*
4710 * queue I/O message to firmware 4710 * queue I/O message to firmware
4711 */ 4711 */
4712 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4712 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4718,7 +4718,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4718{ 4718{
4719 struct bfi_rport_delete_req_s *m; 4719 struct bfi_rport_delete_req_s *m;
4720 4720
4721 /** 4721 /*
4722 * check for room in queue to send request now 4722 * check for room in queue to send request now
4723 */ 4723 */
4724 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4724 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4731,7 +4731,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4731 bfa_lpuid(rp->bfa)); 4731 bfa_lpuid(rp->bfa));
4732 m->fw_handle = rp->fw_handle; 4732 m->fw_handle = rp->fw_handle;
4733 4733
4734 /** 4734 /*
4735 * queue I/O message to firmware 4735 * queue I/O message to firmware
4736 */ 4736 */
4737 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4737 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4743,7 +4743,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4743{ 4743{
4744 struct bfa_rport_speed_req_s *m; 4744 struct bfa_rport_speed_req_s *m;
4745 4745
4746 /** 4746 /*
4747 * check for room in queue to send request now 4747 * check for room in queue to send request now
4748 */ 4748 */
4749 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); 4749 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4757,7 +4757,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4757 m->fw_handle = rp->fw_handle; 4757 m->fw_handle = rp->fw_handle;
4758 m->speed = (u8)rp->rport_info.speed; 4758 m->speed = (u8)rp->rport_info.speed;
4759 4759
4760 /** 4760 /*
4761 * queue I/O message to firmware 4761 * queue I/O message to firmware
4762 */ 4762 */
4763 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4763 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4766,11 +4766,11 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4766 4766
4767 4767
4768 4768
4769/** 4769/*
4770 * bfa_rport_public 4770 * bfa_rport_public
4771 */ 4771 */
4772 4772
4773/** 4773/*
4774 * Rport interrupt processing. 4774 * Rport interrupt processing.
4775 */ 4775 */
4776void 4776void
@@ -4812,7 +4812,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4812 4812
4813 4813
4814 4814
4815/** 4815/*
4816 * bfa_rport_api 4816 * bfa_rport_api
4817 */ 4817 */
4818 4818
@@ -4847,7 +4847,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4847{ 4847{
4848 bfa_assert(rport_info->max_frmsz != 0); 4848 bfa_assert(rport_info->max_frmsz != 0);
4849 4849
4850 /** 4850 /*
4851 * Some JBODs are seen to be not setting PDU size correctly in PLOGI 4851 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4852 * responses. Default to minimum size. 4852 * responses. Default to minimum size.
4853 */ 4853 */
@@ -4899,11 +4899,11 @@ bfa_rport_clear_stats(struct bfa_rport_s *rport)
4899} 4899}
4900 4900
4901 4901
4902/** 4902/*
4903 * SGPG related functions 4903 * SGPG related functions
4904 */ 4904 */
4905 4905
4906/** 4906/*
4907 * Compute and return memory needed by FCP(im) module. 4907 * Compute and return memory needed by FCP(im) module.
4908 */ 4908 */
4909static void 4909static void
@@ -4995,7 +4995,7 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa)
4995 4995
4996 4996
4997 4997
4998/** 4998/*
4999 * hal_sgpg_public BFA SGPG public functions 4999 * hal_sgpg_public BFA SGPG public functions
5000 */ 5000 */
5001 5001
@@ -5037,7 +5037,7 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5037 if (list_empty(&mod->sgpg_wait_q)) 5037 if (list_empty(&mod->sgpg_wait_q))
5038 return; 5038 return;
5039 5039
5040 /** 5040 /*
5041 * satisfy as many waiting requests as possible 5041 * satisfy as many waiting requests as possible
5042 */ 5042 */
5043 do { 5043 do {
@@ -5065,11 +5065,11 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5065 5065
5066 wqe->nsgpg_total = wqe->nsgpg = nsgpg; 5066 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5067 5067
5068 /** 5068 /*
5069 * allocate any left to this one first 5069 * allocate any left to this one first
5070 */ 5070 */
5071 if (mod->free_sgpgs) { 5071 if (mod->free_sgpgs) {
5072 /** 5072 /*
5073 * no one else is waiting for SGPG 5073 * no one else is waiting for SGPG
5074 */ 5074 */
5075 bfa_assert(list_empty(&mod->sgpg_wait_q)); 5075 bfa_assert(list_empty(&mod->sgpg_wait_q));
@@ -5103,7 +5103,7 @@ bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5103 wqe->cbarg = cbarg; 5103 wqe->cbarg = cbarg;
5104} 5104}
5105 5105
5106/** 5106/*
5107 * UF related functions 5107 * UF related functions
5108 */ 5108 */
5109/* 5109/*
@@ -5171,7 +5171,7 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5171 bfa_sge_to_be(&sge[1]); 5171 bfa_sge_to_be(&sge[1]);
5172 } 5172 }
5173 5173
5174 /** 5174 /*
5175 * advance pointer beyond consumed memory 5175 * advance pointer beyond consumed memory
5176 */ 5176 */
5177 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; 5177 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
@@ -5201,7 +5201,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5201 list_add_tail(&uf->qe, &ufm->uf_free_q); 5201 list_add_tail(&uf->qe, &ufm->uf_free_q);
5202 } 5202 }
5203 5203
5204 /** 5204 /*
5205 * advance memory pointer 5205 * advance memory pointer
5206 */ 5206 */
5207 bfa_meminfo_kva(mi) = (u8 *) uf; 5207 bfa_meminfo_kva(mi) = (u8 *) uf;
@@ -5363,11 +5363,11 @@ bfa_uf_start(struct bfa_s *bfa)
5363 5363
5364 5364
5365 5365
5366/** 5366/*
5367 * hal_uf_api 5367 * hal_uf_api
5368 */ 5368 */
5369 5369
5370/** 5370/*
5371 * Register handler for all unsolicted recieve frames. 5371 * Register handler for all unsolicted recieve frames.
5372 * 5372 *
5373 * @param[in] bfa BFA instance 5373 * @param[in] bfa BFA instance
@@ -5383,7 +5383,7 @@ bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5383 ufm->cbarg = cbarg; 5383 ufm->cbarg = cbarg;
5384} 5384}
5385 5385
5386/** 5386/*
5387 * Free an unsolicited frame back to BFA. 5387 * Free an unsolicited frame back to BFA.
5388 * 5388 *
5389 * @param[in] uf unsolicited frame to be freed 5389 * @param[in] uf unsolicited frame to be freed
@@ -5399,7 +5399,7 @@ bfa_uf_free(struct bfa_uf_s *uf)
5399 5399
5400 5400
5401 5401
5402/** 5402/*
5403 * uf_pub BFA uf module public functions 5403 * uf_pub BFA uf module public functions
5404 */ 5404 */
5405void 5405void
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 79f8b86e30e3..1f938974b848 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfad.c Linux driver PCI interface module. 19 * bfad.c Linux driver PCI interface module.
20 */ 20 */
21#include <linux/module.h> 21#include <linux/module.h>
@@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
151static void 151static void
152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); 152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
153 153
154/** 154/*
155 * Beginning state for the driver instance, awaiting the pci_probe event 155 * Beginning state for the driver instance, awaiting the pci_probe event
156 */ 156 */
157static void 157static void
@@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
181 } 181 }
182} 182}
183 183
184/** 184/*
185 * Driver Instance is created, awaiting event INIT to initialize the bfad 185 * Driver Instance is created, awaiting event INIT to initialize the bfad
186 */ 186 */
187static void 187static void
@@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
364 } 364 }
365} 365}
366 366
367/** 367/*
368 * BFA callbacks 368 * BFA callbacks
369 */ 369 */
370void 370void
@@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
376 complete(&fcomp->comp); 376 complete(&fcomp->comp);
377} 377}
378 378
379/** 379/*
380 * bfa_init callback 380 * bfa_init callback
381 */ 381 */
382void 382void
@@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
401 complete(&bfad->comp); 401 complete(&bfad->comp);
402} 402}
403 403
404/** 404/*
405 * BFA_FCS callbacks 405 * BFA_FCS callbacks
406 */ 406 */
407struct bfad_port_s * 407struct bfad_port_s *
@@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
457 } 457 }
458} 458}
459 459
460/** 460/*
461 * FCS RPORT alloc callback, after successful PLOGI by FCS 461 * FCS RPORT alloc callback, after successful PLOGI by FCS
462 */ 462 */
463bfa_status_t 463bfa_status_t
@@ -478,7 +478,7 @@ ext:
478 return rc; 478 return rc;
479} 479}
480 480
481/** 481/*
482 * FCS PBC VPORT Create 482 * FCS PBC VPORT Create
483 */ 483 */
484void 484void
@@ -663,7 +663,7 @@ ext:
663 return rc; 663 return rc;
664} 664}
665 665
666/** 666/*
667 * Create a vport under a vf. 667 * Create a vport under a vf.
668 */ 668 */
669bfa_status_t 669bfa_status_t
@@ -1140,7 +1140,7 @@ bfad_worker(void *ptr)
1140 return 0; 1140 return 0;
1141} 1141}
1142 1142
1143/** 1143/*
1144 * BFA driver interrupt functions 1144 * BFA driver interrupt functions
1145 */ 1145 */
1146irqreturn_t 1146irqreturn_t
@@ -1199,7 +1199,7 @@ bfad_msix(int irq, void *dev_id)
1199 return IRQ_HANDLED; 1199 return IRQ_HANDLED;
1200} 1200}
1201 1201
1202/** 1202/*
1203 * Initialize the MSIX entry table. 1203 * Initialize the MSIX entry table.
1204 */ 1204 */
1205static void 1205static void
@@ -1252,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
1252 return 0; 1252 return 0;
1253} 1253}
1254 1254
1255/** 1255/*
1256 * Setup MSIX based interrupt. 1256 * Setup MSIX based interrupt.
1257 */ 1257 */
1258int 1258int
@@ -1333,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad)
1333 } 1333 }
1334} 1334}
1335 1335
1336/** 1336/*
1337 * PCI probe entry. 1337 * PCI probe entry.
1338 */ 1338 */
1339int 1339int
@@ -1419,7 +1419,7 @@ out:
1419 return error; 1419 return error;
1420} 1420}
1421 1421
1422/** 1422/*
1423 * PCI remove entry. 1423 * PCI remove entry.
1424 */ 1424 */
1425void 1425void
@@ -1500,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = {
1500 .remove = __devexit_p(bfad_pci_remove), 1500 .remove = __devexit_p(bfad_pci_remove),
1501}; 1501};
1502 1502
1503/** 1503/*
1504 * Driver module init. 1504 * Driver module init.
1505 */ 1505 */
1506static int __init 1506static int __init
@@ -1540,7 +1540,7 @@ ext:
1540 return error; 1540 return error;
1541} 1541}
1542 1542
1543/** 1543/*
1544 * Driver module exit. 1544 * Driver module exit.
1545 */ 1545 */
1546static void __exit 1546static void __exit
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 27dd06ff6a3d..ed9fff440b5c 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -15,14 +15,14 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfa_attr.c Linux driver configuration interface module. 19 * bfa_attr.c Linux driver configuration interface module.
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h" 23#include "bfad_im.h"
24 24
25/** 25/*
26 * FC transport template entry, get SCSI target port ID. 26 * FC transport template entry, get SCSI target port ID.
27 */ 27 */
28void 28void
@@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
48 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 48 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
49} 49}
50 50
51/** 51/*
52 * FC transport template entry, get SCSI target nwwn. 52 * FC transport template entry, get SCSI target nwwn.
53 */ 53 */
54void 54void
@@ -74,7 +74,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
74 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 74 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
75} 75}
76 76
77/** 77/*
78 * FC transport template entry, get SCSI target pwwn. 78 * FC transport template entry, get SCSI target pwwn.
79 */ 79 */
80void 80void
@@ -100,7 +100,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
100 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 100 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
101} 101}
102 102
103/** 103/*
104 * FC transport template entry, get SCSI host port ID. 104 * FC transport template entry, get SCSI host port ID.
105 */ 105 */
106void 106void
@@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); 114 bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
115} 115}
116 116
117/** 117/*
118 * FC transport template entry, get SCSI host port type. 118 * FC transport template entry, get SCSI host port type.
119 */ 119 */
120static void 120static void
@@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
146 } 146 }
147} 147}
148 148
149/** 149/*
150 * FC transport template entry, get SCSI host port state. 150 * FC transport template entry, get SCSI host port state.
151 */ 151 */
152static void 152static void
@@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
183 } 183 }
184} 184}
185 185
186/** 186/*
187 * FC transport template entry, get SCSI host active fc4s. 187 * FC transport template entry, get SCSI host active fc4s.
188 */ 188 */
189static void 189static void
@@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
202 fc_host_active_fc4s(shost)[7] = 1; 202 fc_host_active_fc4s(shost)[7] = 1;
203} 203}
204 204
205/** 205/*
206 * FC transport template entry, get SCSI host link speed. 206 * FC transport template entry, get SCSI host link speed.
207 */ 207 */
208static void 208static void
@@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
236 } 236 }
237} 237}
238 238
239/** 239/*
240 * FC transport template entry, get SCSI host port type. 240 * FC transport template entry, get SCSI host port type.
241 */ 241 */
242static void 242static void
@@ -253,7 +253,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
253 253
254} 254}
255 255
256/** 256/*
257 * FC transport template entry, get BFAD statistics. 257 * FC transport template entry, get BFAD statistics.
258 */ 258 */
259static struct fc_host_statistics * 259static struct fc_host_statistics *
@@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
304 return hstats; 304 return hstats;
305} 305}
306 306
307/** 307/*
308 * FC transport template entry, reset BFAD statistics. 308 * FC transport template entry, reset BFAD statistics.
309 */ 309 */
310static void 310static void
@@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
331 return; 331 return;
332} 332}
333 333
334/** 334/*
335 * FC transport template entry, get rport loss timeout. 335 * FC transport template entry, get rport loss timeout.
336 */ 336 */
337static void 337static void
@@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
347 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 347 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
348} 348}
349 349
350/** 350/*
351 * FC transport template entry, set rport loss timeout. 351 * FC transport template entry, set rport loss timeout.
352 */ 352 */
353static void 353static void
@@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 633 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
634}; 634};
635 635
636/** 636/*
637 * Scsi_Host_attrs SCSI host attributes 637 * Scsi_Host_attrs SCSI host attributes
638 */ 638 */
639static ssize_t 639static ssize_t
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 7a6f48b277c2..8daa716739d1 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17 17
18/** 18/*
19 * bfad_im.c Linux driver IM module. 19 * bfad_im.c Linux driver IM module.
20 */ 20 */
21 21
@@ -164,10 +164,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
164 wake_up(wq); 164 wake_up(wq);
165} 165}
166 166
167/** 167/*
168 * Scsi_Host_template SCSI host template 168 * Scsi_Host_template SCSI host template
169 */ 169 */
170/** 170/*
171 * Scsi_Host template entry, returns BFAD PCI info. 171 * Scsi_Host template entry, returns BFAD PCI info.
172 */ 172 */
173static const char * 173static const char *
@@ -196,7 +196,7 @@ bfad_im_info(struct Scsi_Host *shost)
196 return bfa_buf; 196 return bfa_buf;
197} 197}
198 198
199/** 199/*
200 * Scsi_Host template entry, aborts the specified SCSI command. 200 * Scsi_Host template entry, aborts the specified SCSI command.
201 * 201 *
202 * Returns: SUCCESS or FAILED. 202 * Returns: SUCCESS or FAILED.
@@ -280,7 +280,7 @@ out:
280 return rc; 280 return rc;
281} 281}
282 282
283/** 283/*
284 * Scsi_Host template entry, resets a LUN and abort its all commands. 284 * Scsi_Host template entry, resets a LUN and abort its all commands.
285 * 285 *
286 * Returns: SUCCESS or FAILED. 286 * Returns: SUCCESS or FAILED.
@@ -319,7 +319,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
319 goto out; 319 goto out;
320 } 320 }
321 321
322 /** 322 /*
323 * Set host_scribble to NULL to avoid aborting a task command 323 * Set host_scribble to NULL to avoid aborting a task command
324 * if happens. 324 * if happens.
325 */ 325 */
@@ -346,7 +346,7 @@ out:
346 return rc; 346 return rc;
347} 347}
348 348
349/** 349/*
350 * Scsi_Host template entry, resets the bus and abort all commands. 350 * Scsi_Host template entry, resets the bus and abort all commands.
351 */ 351 */
352static int 352static int
@@ -396,7 +396,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
396 return SUCCESS; 396 return SUCCESS;
397} 397}
398 398
399/** 399/*
400 * Scsi_Host template entry slave_destroy. 400 * Scsi_Host template entry slave_destroy.
401 */ 401 */
402static void 402static void
@@ -406,11 +406,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
406 return; 406 return;
407} 407}
408 408
409/** 409/*
410 * BFA FCS itnim callbacks 410 * BFA FCS itnim callbacks
411 */ 411 */
412 412
413/** 413/*
414 * BFA FCS itnim alloc callback, after successful PRLI 414 * BFA FCS itnim alloc callback, after successful PRLI
415 * Context: Interrupt 415 * Context: Interrupt
416 */ 416 */
@@ -433,7 +433,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
433 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 433 bfad->bfad_flags |= BFAD_RPORT_ONLINE;
434} 434}
435 435
436/** 436/*
437 * BFA FCS itnim free callback. 437 * BFA FCS itnim free callback.
438 * Context: Interrupt. bfad_lock is held 438 * Context: Interrupt. bfad_lock is held
439 */ 439 */
@@ -471,7 +471,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
471 queue_work(im->drv_workq, &itnim_drv->itnim_work); 471 queue_work(im->drv_workq, &itnim_drv->itnim_work);
472} 472}
473 473
474/** 474/*
475 * BFA FCS itnim online callback. 475 * BFA FCS itnim online callback.
476 * Context: Interrupt. bfad_lock is held 476 * Context: Interrupt. bfad_lock is held
477 */ 477 */
@@ -492,7 +492,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
492 queue_work(im->drv_workq, &itnim_drv->itnim_work); 492 queue_work(im->drv_workq, &itnim_drv->itnim_work);
493} 493}
494 494
495/** 495/*
496 * BFA FCS itnim offline callback. 496 * BFA FCS itnim offline callback.
497 * Context: Interrupt. bfad_lock is held 497 * Context: Interrupt. bfad_lock is held
498 */ 498 */
@@ -519,7 +519,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
519 queue_work(im->drv_workq, &itnim_drv->itnim_work); 519 queue_work(im->drv_workq, &itnim_drv->itnim_work);
520} 520}
521 521
522/** 522/*
523 * Allocate a Scsi_Host for a port. 523 * Allocate a Scsi_Host for a port.
524 */ 524 */
525int 525int
@@ -751,7 +751,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
751 return BFA_STATUS_OK; 751 return BFA_STATUS_OK;
752} 752}
753 753
754/** 754/*
755 * Scsi_Host template entry. 755 * Scsi_Host template entry.
756 * 756 *
757 * Description: 757 * Description:
@@ -896,7 +896,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
896 return NULL; 896 return NULL;
897} 897}
898 898
899/** 899/*
900 * Scsi_Host template entry slave_alloc 900 * Scsi_Host template entry slave_alloc
901 */ 901 */
902static int 902static int
@@ -973,7 +973,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
973 sprintf(fc_host_symbolic_name(host), "%s", symname); 973 sprintf(fc_host_symbolic_name(host), "%s", symname);
974 974
975 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 975 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
976 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 976 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
977} 977}
978 978
979static void 979static void
@@ -1016,7 +1016,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
1016 return; 1016 return;
1017} 1017}
1018 1018
1019/** 1019/*
1020 * Work queue handler using FC transport service 1020 * Work queue handler using FC transport service
1021* Context: kernel 1021* Context: kernel
1022 */ 1022 */
@@ -1116,7 +1116,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
1116 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1116 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1117} 1117}
1118 1118
1119/** 1119/*
1120 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1120 * Scsi_Host template entry, queue a SCSI command to the BFAD.
1121 */ 1121 */
1122static int 1122static int