aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:57:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:57:01 -0400
commit6a5a3d6a4adde0c66f3be29bbd7c0d6ffb7e1a40 (patch)
treeae416ffa4458df755f984a05d65ee1c3e220c40b
parent8bbbfa70549bd84f29ff331d0ac051897ccbbd72 (diff)
parent5c1b10ab7f93d24f29b5630286e323d1c5802d5c (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull misc SCSI updates from James Bottomley: "This is an assorted set of stragglers into the merge window with driver updates for megaraid_sas, lpfc, bfi and mvumi. It also includes some fairly major fixes for virtio-scsi (scatterlist init), scsi_debug (off by one error), storvsc (use after free) and qla2xxx (potential deadlock). Signed-off-by: James Bottomley <JBottomley@Parallels.com>" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (49 commits) [SCSI] storvsc: Account for in-transit packets in the RESET path [SCSI] qla2xxx: fix potential deadlock on ha->hardware_lock [SCSI] scsi_debug: Fix off-by-one bug when unmapping region [SCSI] Shorten the path length of scsi_cmd_to_driver() [SCSI] virtio-scsi: support online resizing of disks [SCSI] virtio-scsi: fix LUNs greater than 255 [SCSI] virtio-scsi: initialize scatterlist structure [SCSI] megaraid_sas: Version, Changelog, Copyright update [SCSI] megaraid_sas: Remove duplicate code [SCSI] megaraid_sas: Add SystemPD FastPath support [SCSI] megaraid_sas: Add array boundary check for SystemPD [SCSI] megaraid_sas: Load io_request DataLength in bytes [SCSI] megaraid_sas: Add module param for configurable MSI-X vector count [SCSI] megaraid_sas: Remove un-needed completion_lock spinlock calls [SCSI] lpfc 8.3.35: Update lpfc version for 8.3.35 driver release [SCSI] lpfc 8.3.35: Fixed not reporting logical link speed to SCSI midlayer when QoS not on [SCSI] lpfc 8.3.35: Fix error with fabric service parameters causing performance issues [SCSI] lpfc 8.3.35: Fixed SCSI host create showing wrong link speed on SLI3 HBA ports [SCSI] lpfc 8.3.35: Fixed not checking solicition in progress bit when verifying FCF record for use [SCSI] lpfc 8.3.35: Fixed messages for misconfigured port errors ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe12
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas13
-rw-r--r--drivers/scsi/bfa/bfa_core.c85
-rw-r--r--drivers/scsi/bfa/bfa_defs.h61
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h119
-rw-r--r--drivers/scsi/bfa/bfa_fc.h5
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c123
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h13
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c64
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h23
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c155
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c288
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c494
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h63
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c236
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_port.c32
-rw-r--r--drivers/scsi/bfa/bfa_port.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c732
-rw-r--r--drivers/scsi/bfa/bfa_svc.h30
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c375
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h63
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfi.h72
-rw-r--r--drivers/scsi/bfa/bfi_ms.h14
-rw-r--r--drivers/scsi/bfa/bfi_reg.h3
-rw-r--r--drivers/scsi/fcoe/fcoe.c14
-rw-r--r--drivers/scsi/fcoe/fcoe.h2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c256
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c96
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h2
-rw-r--r--drivers/scsi/mvumi.c1093
-rw-r--r--drivers/scsi/mvumi.h235
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c5
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c8
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/virtio_scsi.c39
-rw-r--r--include/linux/virtio_scsi.h2
-rw-r--r--include/scsi/fc/fc_fcp.h6
-rw-r--r--include/scsi/libfcoe.h2
-rw-r--r--include/scsi/scsi_cmnd.h12
58 files changed, 4274 insertions, 747 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
index 469d09c02f6b..50e2a80ea28f 100644
--- a/Documentation/ABI/testing/sysfs-bus-fcoe
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -9,19 +9,19 @@ Attributes:
9 this value will change the dev_loss_tmo for all 9 this value will change the dev_loss_tmo for all
10 FCFs discovered by this controller. 10 FCFs discovered by this controller.
11 11
12 lesb_link_fail: Link Error Status Block (LESB) link failure count. 12 lesb/link_fail: Link Error Status Block (LESB) link failure count.
13 13
14 lesb_vlink_fail: Link Error Status Block (LESB) virtual link 14 lesb/vlink_fail: Link Error Status Block (LESB) virtual link
15 failure count. 15 failure count.
16 16
17 lesb_miss_fka: Link Error Status Block (LESB) missed FCoE 17 lesb/miss_fka: Link Error Status Block (LESB) missed FCoE
18 Initialization Protocol (FIP) Keep-Alives (FKA). 18 Initialization Protocol (FIP) Keep-Alives (FKA).
19 19
20 lesb_symb_err: Link Error Status Block (LESB) symbolic error count. 20 lesb/symb_err: Link Error Status Block (LESB) symbolic error count.
21 21
22 lesb_err_block: Link Error Status Block (LESB) block error count. 22 lesb/err_block: Link Error Status Block (LESB) block error count.
23 23
24 lesb_fcs_error: Link Error Status Block (LESB) Fibre Channel 24 lesb/fcs_error: Link Error Status Block (LESB) Fibre Channel
25 Serivces error count. 25 Serivces error count.
26 26
27Notes: ctlr_X (global increment starting at 0) 27Notes: ctlr_X (global increment starting at 0)
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 3a3079411a3d..da03146c182a 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,16 @@
1Release Date : Mon. Oct 1, 2012 17:00:00 PST 2012 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4Current Version : 06.504.01.00-rc1
5Old Version : 00.00.06.18-rc1
6 1. Removed un-needed completion_lock spinlock calls.
7 2. Add module param for configurable MSI-X vector count.
8 3. Load io_request DataLength in bytes.
9 4. Add array boundary check for SystemPD.
10 5. Add SystemPD FastPath support.
11 6. Remove duplicate code.
12 7. Version, Changelog, Copyright update.
13-------------------------------------------------------------------------------
1Release Date : Tue. Jun 17, 2012 17:00:00 PST 2012 - 14Release Date : Tue. Jun 17, 2012 17:00:00 PST 2012 -
2 (emaild-id:megaraidlinux@lsi.com) 15 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford/Kashyap Desai 16 Adam Radford/Kashyap Desai
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index b7c326f7a6d0..342d7d9c0997 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -165,6 +165,16 @@ bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
165 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg); 165 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
166} 166}
167 167
168static void
169bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
170{
171 struct bfa_fru_s *fru = BFA_FRU(bfa);
172 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
173
174 bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
175 bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
176}
177
168/* 178/*
169 * BFA IOC FC related definitions 179 * BFA IOC FC related definitions
170 */ 180 */
@@ -274,6 +284,15 @@ bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
274 case IOCFC_E_IOC_ENABLED: 284 case IOCFC_E_IOC_ENABLED:
275 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read); 285 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
276 break; 286 break;
287
288 case IOCFC_E_DISABLE:
289 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
290 break;
291
292 case IOCFC_E_STOP:
293 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
294 break;
295
277 case IOCFC_E_IOC_FAILED: 296 case IOCFC_E_IOC_FAILED:
278 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 297 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
279 break; 298 break;
@@ -298,6 +317,15 @@ bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
298 case IOCFC_E_DCONF_DONE: 317 case IOCFC_E_DCONF_DONE:
299 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait); 318 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
300 break; 319 break;
320
321 case IOCFC_E_DISABLE:
322 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
323 break;
324
325 case IOCFC_E_STOP:
326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
327 break;
328
301 case IOCFC_E_IOC_FAILED: 329 case IOCFC_E_IOC_FAILED:
302 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 330 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
303 break; 331 break;
@@ -322,6 +350,15 @@ bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
322 case IOCFC_E_CFG_DONE: 350 case IOCFC_E_CFG_DONE:
323 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done); 351 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
324 break; 352 break;
353
354 case IOCFC_E_DISABLE:
355 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
356 break;
357
358 case IOCFC_E_STOP:
359 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
360 break;
361
325 case IOCFC_E_IOC_FAILED: 362 case IOCFC_E_IOC_FAILED:
326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed); 363 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
327 break; 364 break;
@@ -433,6 +470,12 @@ bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
433 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe, 470 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
434 bfa_iocfc_stop_cb, iocfc->bfa); 471 bfa_iocfc_stop_cb, iocfc->bfa);
435 break; 472 break;
473
474 case IOCFC_E_IOC_ENABLED:
475 case IOCFC_E_DCONF_DONE:
476 case IOCFC_E_CFG_DONE:
477 break;
478
436 default: 479 default:
437 bfa_sm_fault(iocfc->bfa, event); 480 bfa_sm_fault(iocfc->bfa, event);
438 break; 481 break;
@@ -454,6 +497,15 @@ bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
454 case IOCFC_E_IOC_ENABLED: 497 case IOCFC_E_IOC_ENABLED:
455 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait); 498 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
456 break; 499 break;
500
501 case IOCFC_E_DISABLE:
502 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
503 break;
504
505 case IOCFC_E_STOP:
506 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
507 break;
508
457 case IOCFC_E_IOC_FAILED: 509 case IOCFC_E_IOC_FAILED:
458 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 510 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
459 511
@@ -493,6 +545,13 @@ bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
493 bfa_iocfc_enable_cb, iocfc->bfa); 545 bfa_iocfc_enable_cb, iocfc->bfa);
494 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE; 546 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
495 break; 547 break;
548 case IOCFC_E_DISABLE:
549 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
550 break;
551
552 case IOCFC_E_STOP:
553 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
554 break;
496 case IOCFC_E_IOC_FAILED: 555 case IOCFC_E_IOC_FAILED:
497 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed); 556 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
498 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE) 557 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
@@ -524,6 +583,10 @@ bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
524 case IOCFC_E_IOC_DISABLED: 583 case IOCFC_E_IOC_DISABLED:
525 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled); 584 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
526 break; 585 break;
586 case IOCFC_E_IOC_ENABLED:
587 case IOCFC_E_DCONF_DONE:
588 case IOCFC_E_CFG_DONE:
589 break;
527 default: 590 default:
528 bfa_sm_fault(iocfc->bfa, event); 591 bfa_sm_fault(iocfc->bfa, event);
529 break; 592 break;
@@ -785,19 +848,20 @@ void
785bfa_isr_enable(struct bfa_s *bfa) 848bfa_isr_enable(struct bfa_s *bfa)
786{ 849{
787 u32 umsk; 850 u32 umsk;
788 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 851 int port_id = bfa_ioc_portid(&bfa->ioc);
789 852
790 bfa_trc(bfa, pci_func); 853 bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
854 bfa_trc(bfa, port_id);
791 855
792 bfa_msix_ctrl_install(bfa); 856 bfa_msix_ctrl_install(bfa);
793 857
794 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 858 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
795 umsk = __HFN_INT_ERR_MASK_CT2; 859 umsk = __HFN_INT_ERR_MASK_CT2;
796 umsk |= pci_func == 0 ? 860 umsk |= port_id == 0 ?
797 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2; 861 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
798 } else { 862 } else {
799 umsk = __HFN_INT_ERR_MASK; 863 umsk = __HFN_INT_ERR_MASK;
800 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK; 864 umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
801 } 865 }
802 866
803 writel(umsk, bfa->iocfc.bfa_regs.intr_status); 867 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
@@ -930,7 +994,8 @@ bfa_iocfc_send_cfg(void *bfa_arg)
930 cfg_info->single_msix_vec = 1; 994 cfg_info->single_msix_vec = 1;
931 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 995 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
932 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 996 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
933 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); 997 cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
998 cfg->fwcfg.num_ioim_reqs));
934 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 999 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
935 1000
936 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 1001 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
@@ -1192,10 +1257,14 @@ bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
1192static void 1257static void
1193bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 1258bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
1194{ 1259{
1260 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1261 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
1262
1195 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 1263 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
1196 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 1264 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
1197 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 1265 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
1198 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); 1266 bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
1267 fwcfg->num_ioim_reqs);
1199 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 1268 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
1200} 1269}
1201 1270
@@ -1693,6 +1762,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1693 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa); 1762 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1694 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa); 1763 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1695 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa); 1764 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1765 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
1696 1766
1697 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1767 WARN_ON((cfg == NULL) || (meminfo == NULL));
1698 1768
@@ -1717,6 +1787,8 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1717 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo()); 1787 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1718 bfa_mem_dma_setup(meminfo, phy_dma, 1788 bfa_mem_dma_setup(meminfo, phy_dma,
1719 bfa_phy_meminfo(cfg->drvcfg.min_cfg)); 1789 bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1790 bfa_mem_dma_setup(meminfo, fru_dma,
1791 bfa_fru_meminfo(cfg->drvcfg.min_cfg));
1720} 1792}
1721 1793
1722/* 1794/*
@@ -1789,6 +1861,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1789 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg); 1861 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1790 bfa_com_diag_attach(bfa); 1862 bfa_com_diag_attach(bfa);
1791 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg); 1863 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1864 bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
1792} 1865}
1793 1866
1794/* 1867/*
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index b5a1595cc0a5..0efdf312b42c 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -159,10 +159,13 @@ enum bfa_status {
159 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ 159 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
160 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ 160 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
161 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ 161 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
162 BFA_STATUS_ERROR_TRL_ENABLED = 87, /* TRL is enabled */
163 BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */
162 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ 164 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
163 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */ 165 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
164 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ 166 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
165 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 167 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
168 BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
166 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 169 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
167 * configuration */ 170 * configuration */
168 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ 171 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
@@ -184,6 +187,17 @@ enum bfa_status {
184 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ 187 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
185 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ 188 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
186 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ 189 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
190 BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */
191 BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported
192 * on mezz cards */
193 BFA_STATUS_INVALID_BW = 233, /* Invalid bandwidth value */
194 BFA_STATUS_QOS_BW_INVALID = 234, /* Invalid QOS bandwidth
195 * configuration */
196 BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */
197 BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */
198 BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */
199 BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */
200 BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */
187 BFA_STATUS_MAX_VAL /* Unknown error code */ 201 BFA_STATUS_MAX_VAL /* Unknown error code */
188}; 202};
189#define bfa_status_t enum bfa_status 203#define bfa_status_t enum bfa_status
@@ -249,6 +263,10 @@ struct bfa_adapter_attr_s {
249 263
250 u8 is_mezz; 264 u8 is_mezz;
251 u8 trunk_capable; 265 u8 trunk_capable;
266 u8 mfg_day; /* manufacturing day */
267 u8 mfg_month; /* manufacturing month */
268 u16 mfg_year; /* manufacturing year */
269 u16 rsvd;
252}; 270};
253 271
254/* 272/*
@@ -499,6 +517,17 @@ struct bfa_ioc_aen_data_s {
499}; 517};
500 518
501/* 519/*
520 * D-port states
521 *
522*/
523enum bfa_dport_state {
524 BFA_DPORT_ST_DISABLED = 0, /* D-port is Disabled */
525 BFA_DPORT_ST_DISABLING = 1, /* D-port is Disabling */
526 BFA_DPORT_ST_ENABLING = 2, /* D-port is Enabling */
527 BFA_DPORT_ST_ENABLED = 3, /* D-port is Enabled */
528};
529
530/*
502 * ---------------------- mfg definitions ------------ 531 * ---------------------- mfg definitions ------------
503 */ 532 */
504 533
@@ -722,7 +751,8 @@ struct bfa_ablk_cfg_pf_s {
722 u8 rsvd[1]; 751 u8 rsvd[1];
723 u16 num_qpairs; 752 u16 num_qpairs;
724 u16 num_vectors; 753 u16 num_vectors;
725 u32 bw; 754 u16 bw_min;
755 u16 bw_max;
726}; 756};
727 757
728struct bfa_ablk_cfg_port_s { 758struct bfa_ablk_cfg_port_s {
@@ -889,11 +919,40 @@ struct sfp_diag_ext_s {
889 u8 ext_status_ctl[2]; 919 u8 ext_status_ctl[2];
890}; 920};
891 921
922/*
923 * Diagnostic: Data Fields -- Address A2h
924 * General Use Fields: User Writable Table - Features's Control Registers
925 * Total 32 bytes
926 */
927struct sfp_usr_eeprom_s {
928 u8 rsvd1[2]; /* 128-129 */
929 u8 ewrap; /* 130 */
930 u8 rsvd2[2]; /* */
931 u8 owrap; /* 133 */
932 u8 rsvd3[2]; /* */
933 u8 prbs; /* 136: PRBS 7 generator */
934 u8 rsvd4[2]; /* */
935 u8 tx_eqz_16; /* 139: TX Equalizer (16xFC) */
936 u8 tx_eqz_8; /* 140: TX Equalizer (8xFC) */
937 u8 rsvd5[2]; /* */
938 u8 rx_emp_16; /* 143: RX Emphasis (16xFC) */
939 u8 rx_emp_8; /* 144: RX Emphasis (8xFC) */
940 u8 rsvd6[2]; /* */
941 u8 tx_eye_adj; /* 147: TX eye Threshold Adjust */
942 u8 rsvd7[3]; /* */
943 u8 tx_eye_qctl; /* 151: TX eye Quality Control */
944 u8 tx_eye_qres; /* 152: TX eye Quality Result */
945 u8 rsvd8[2]; /* */
946 u8 poh[3]; /* 155-157: Power On Hours */
947 u8 rsvd9[2]; /* */
948};
949
892struct sfp_mem_s { 950struct sfp_mem_s {
893 struct sfp_srlid_base_s srlid_base; 951 struct sfp_srlid_base_s srlid_base;
894 struct sfp_srlid_ext_s srlid_ext; 952 struct sfp_srlid_ext_s srlid_ext;
895 struct sfp_diag_base_s diag_base; 953 struct sfp_diag_base_s diag_base;
896 struct sfp_diag_ext_s diag_ext; 954 struct sfp_diag_ext_s diag_ext;
955 struct sfp_usr_eeprom_s usr_eeprom;
897}; 956};
898 957
899/* 958/*
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 36756ce0e58f..ec03c8cd8dac 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -258,6 +258,7 @@ struct bfa_fw_port_lksm_stats_s {
258 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ 258 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
259 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ 259 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
260 u32 bbsc_lr; /* LKSM LR tx for credit recovery */ 260 u32 bbsc_lr; /* LKSM LR tx for credit recovery */
261 u32 rsvd;
261}; 262};
262 263
263struct bfa_fw_port_snsm_stats_s { 264struct bfa_fw_port_snsm_stats_s {
@@ -270,6 +271,9 @@ struct bfa_fw_port_snsm_stats_s {
270 u32 sync_lost; /* Sync loss count */ 271 u32 sync_lost; /* Sync loss count */
271 u32 sig_lost; /* Signal loss count */ 272 u32 sig_lost; /* Signal loss count */
272 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */ 273 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
274 u32 adapt_success; /* SNSM adaptation success */
275 u32 adapt_fails; /* SNSM adaptation failures */
276 u32 adapt_ign_fails; /* SNSM adaptation failures ignored */
273}; 277};
274 278
275struct bfa_fw_port_physm_stats_s { 279struct bfa_fw_port_physm_stats_s {
@@ -324,12 +328,46 @@ struct bfa_fw_fcoe_port_stats_s {
324 struct bfa_fw_fip_stats_s fip_stats; 328 struct bfa_fw_fip_stats_s fip_stats;
325}; 329};
326 330
331/**
332 * @brief LPSM statistics
333 */
334struct bfa_fw_lpsm_stats_s {
335 u32 cls_rx; /* LPSM cls_rx */
336 u32 cls_tx; /* LPSM cls_tx */
337 u32 arbf0_rx; /* LPSM abrf0 rcvd */
338 u32 arbf0_tx; /* LPSM abrf0 xmit */
339 u32 init_rx; /* LPSM loop init start */
340 u32 unexp_hwst; /* LPSM unknown hw state */
341 u32 unexp_frame; /* LPSM unknown_frame */
342 u32 unexp_prim; /* LPSM unexpected primitive */
343 u32 prev_alpa_unavail; /* LPSM prev alpa unavailable */
344 u32 alpa_unavail; /* LPSM alpa not available */
345 u32 lip_rx; /* LPSM lip rcvd */
346 u32 lip_f7f7_rx; /* LPSM lip f7f7 rcvd */
347 u32 lip_f8_rx; /* LPSM lip f8 rcvd */
348 u32 lip_f8f7_rx; /* LPSM lip f8f7 rcvd */
349 u32 lip_other_rx; /* LPSM lip other rcvd */
350 u32 lip_tx; /* LPSM lip xmit */
351 u32 retry_tov; /* LPSM retry TOV */
352 u32 lip_tov; /* LPSM LIP wait TOV */
353 u32 idle_tov; /* LPSM idle wait TOV */
354 u32 arbf0_tov; /* LPSM arbfo wait TOV */
355 u32 stop_loop_tov; /* LPSM stop loop wait TOV */
356 u32 lixa_tov; /* LPSM lisa wait TOV */
357 u32 lixx_tov; /* LPSM lilp/lirp wait TOV */
358 u32 cls_tov; /* LPSM cls wait TOV */
359 u32 sler; /* LPSM SLER recvd */
360 u32 failed; /* LPSM failed */
361 u32 success; /* LPSM online */
362};
363
327/* 364/*
328 * IOC firmware FC uport stats 365 * IOC firmware FC uport stats
329 */ 366 */
330struct bfa_fw_fc_uport_stats_s { 367struct bfa_fw_fc_uport_stats_s {
331 struct bfa_fw_port_snsm_stats_s snsm_stats; 368 struct bfa_fw_port_snsm_stats_s snsm_stats;
332 struct bfa_fw_port_lksm_stats_s lksm_stats; 369 struct bfa_fw_port_lksm_stats_s lksm_stats;
370 struct bfa_fw_lpsm_stats_s lpsm_stats;
333}; 371};
334 372
335/* 373/*
@@ -357,11 +395,6 @@ struct bfa_fw_fcxchg_stats_s {
357 u32 ua_state_inv; 395 u32 ua_state_inv;
358}; 396};
359 397
360struct bfa_fw_lpsm_stats_s {
361 u32 cls_rx;
362 u32 cls_tx;
363};
364
365/* 398/*
366 * Trunk statistics 399 * Trunk statistics
367 */ 400 */
@@ -454,7 +487,6 @@ struct bfa_fw_stats_s {
454 struct bfa_fw_io_stats_s io_stats; 487 struct bfa_fw_io_stats_s io_stats;
455 struct bfa_fw_port_stats_s port_stats; 488 struct bfa_fw_port_stats_s port_stats;
456 struct bfa_fw_fcxchg_stats_s fcxchg_stats; 489 struct bfa_fw_fcxchg_stats_s fcxchg_stats;
457 struct bfa_fw_lpsm_stats_s lpsm_stats;
458 struct bfa_fw_lps_stats_s lps_stats; 490 struct bfa_fw_lps_stats_s lps_stats;
459 struct bfa_fw_trunk_stats_s trunk_stats; 491 struct bfa_fw_trunk_stats_s trunk_stats;
460 struct bfa_fw_advsm_stats_s advsm_stats; 492 struct bfa_fw_advsm_stats_s advsm_stats;
@@ -494,13 +526,23 @@ enum bfa_qos_bw_alloc {
494 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ 526 BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */
495}; 527};
496#pragma pack(1) 528#pragma pack(1)
529
530struct bfa_qos_bw_s {
531 u8 qos_bw_set;
532 u8 high;
533 u8 med;
534 u8 low;
535};
536
497/* 537/*
498 * QoS attribute returned in QoS Query 538 * QoS attribute returned in QoS Query
499 */ 539 */
500struct bfa_qos_attr_s { 540struct bfa_qos_attr_s {
501 u8 state; /* QoS current state */ 541 u8 state; /* QoS current state */
502 u8 rsvd[3]; 542 u8 rsvd1[3];
503 u32 total_bb_cr; /* Total BB Credits */ 543 u32 total_bb_cr; /* Total BB Credits */
544 struct bfa_qos_bw_s qos_bw; /* QOS bw cfg */
545 struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */
504}; 546};
505 547
506/* 548/*
@@ -692,7 +734,8 @@ enum bfa_port_states {
692 BFA_PORT_ST_FWMISMATCH = 12, 734 BFA_PORT_ST_FWMISMATCH = 12,
693 BFA_PORT_ST_PREBOOT_DISABLED = 13, 735 BFA_PORT_ST_PREBOOT_DISABLED = 13,
694 BFA_PORT_ST_TOGGLING_QWAIT = 14, 736 BFA_PORT_ST_TOGGLING_QWAIT = 14,
695 BFA_PORT_ST_ACQ_ADDR = 15, 737 BFA_PORT_ST_FAA_MISCONFIG = 15,
738 BFA_PORT_ST_DPORT = 16,
696 BFA_PORT_ST_MAX_STATE, 739 BFA_PORT_ST_MAX_STATE,
697}; 740};
698 741
@@ -714,9 +757,11 @@ enum bfa_port_type {
714 */ 757 */
715enum bfa_port_topology { 758enum bfa_port_topology {
716 BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ 759 BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */
717 BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */ 760 BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */
718 BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ 761 BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */
719 BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ 762 BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */
763 BFA_PORT_TOPOLOGY_AUTO = 4, /* auto topology selection */
764 BFA_PORT_TOPOLOGY_P2P = 5, /* P2P only */
720}; 765};
721 766
722/* 767/*
@@ -760,6 +805,7 @@ enum bfa_port_linkstate_rsn {
760 BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, 805 BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9,
761 BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, 806 BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10,
762 BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, 807 BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11,
808 BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG = 12,
763 809
764 810
765 811
@@ -833,6 +879,19 @@ struct bfa_lunmask_cfg_s {
833 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; 879 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
834}; 880};
835 881
882struct bfa_throttle_cfg_s {
883 u16 is_valid;
884 u16 value;
885 u32 rsvd;
886};
887
888struct bfa_defs_fcpim_throttle_s {
889 u16 max_value;
890 u16 cur_value;
891 u16 cfg_value;
892 u16 rsvd;
893};
894
836/* 895/*
837 * Physical port configuration 896 * Physical port configuration
838 */ 897 */
@@ -851,9 +910,10 @@ struct bfa_port_cfg_s {
851 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ 910 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
852 u8 bb_scn_state; /* Config state of BB_SCN */ 911 u8 bb_scn_state; /* Config state of BB_SCN */
853 u8 faa_state; /* FAA enabled/disabled */ 912 u8 faa_state; /* FAA enabled/disabled */
854 u8 rsvd[1]; 913 u8 rsvd1;
855 u16 path_tov; /* device path timeout */ 914 u16 path_tov; /* device path timeout */
856 u16 q_depth; /* SCSI Queue depth */ 915 u16 q_depth; /* SCSI Queue depth */
916 struct bfa_qos_bw_s qos_bw; /* QOS bandwidth */
857}; 917};
858#pragma pack() 918#pragma pack()
859 919
@@ -901,7 +961,7 @@ struct bfa_port_attr_s {
901 961
902 /* FCoE specific */ 962 /* FCoE specific */
903 u16 fcoe_vlan; 963 u16 fcoe_vlan;
904 u8 rsvd1[2]; 964 u8 rsvd1[6];
905}; 965};
906 966
907/* 967/*
@@ -971,6 +1031,13 @@ struct bfa_trunk_vc_attr_s {
971 u16 vc_credits[8]; 1031 u16 vc_credits[8];
972}; 1032};
973 1033
1034struct bfa_fcport_loop_info_s {
1035 u8 myalpa; /* alpa claimed */
1036 u8 alpabm_val; /* alpa bitmap valid or not (1 or 0) */
1037 u8 resvd[6];
1038 struct fc_alpabm_s alpabm; /* alpa bitmap */
1039};
1040
974/* 1041/*
975 * Link state information 1042 * Link state information
976 */ 1043 */
@@ -981,13 +1048,18 @@ struct bfa_port_link_s {
981 u8 speed; /* Link speed (1/2/4/8 G) */ 1048 u8 speed; /* Link speed (1/2/4/8 G) */
982 u32 linkstate_opt; /* Linkstate optional data (debug) */ 1049 u32 linkstate_opt; /* Linkstate optional data (debug) */
983 u8 trunked; /* Trunked or not (1 or 0) */ 1050 u8 trunked; /* Trunked or not (1 or 0) */
984 u8 resvd[3]; 1051 u8 resvd[7];
985 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 1052 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
986 union { 1053 union {
987 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ 1054 struct bfa_fcport_loop_info_s loop_info;
988 struct bfa_trunk_vc_attr_s trunk_vc_attr; 1055 union {
989 struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */ 1056 struct bfa_qos_vc_attr_s qos_vc_attr;
990 } vc_fcf; 1057 /* VC info from ELP */
1058 struct bfa_trunk_vc_attr_s trunk_vc_attr;
1059 struct bfa_fcport_fcf_s fcf;
1060 /* FCF information (for FCoE) */
1061 } vc_fcf;
1062 } attr;
991}; 1063};
992#pragma pack() 1064#pragma pack()
993 1065
@@ -1112,6 +1184,9 @@ struct bfa_port_fc_stats_s {
1112 u64 tx_frames; /* Tx frames */ 1184 u64 tx_frames; /* Tx frames */
1113 u64 tx_words; /* Tx words */ 1185 u64 tx_words; /* Tx words */
1114 u64 tx_lip; /* Tx LIP */ 1186 u64 tx_lip; /* Tx LIP */
1187 u64 tx_lip_f7f7; /* Tx LIP_F7F7 */
1188 u64 tx_lip_f8f7; /* Tx LIP_F8F7 */
1189 u64 tx_arbf0; /* Tx ARB F0 */
1115 u64 tx_nos; /* Tx NOS */ 1190 u64 tx_nos; /* Tx NOS */
1116 u64 tx_ols; /* Tx OLS */ 1191 u64 tx_ols; /* Tx OLS */
1117 u64 tx_lr; /* Tx LR */ 1192 u64 tx_lr; /* Tx LR */
@@ -1119,6 +1194,9 @@ struct bfa_port_fc_stats_s {
1119 u64 rx_frames; /* Rx frames */ 1194 u64 rx_frames; /* Rx frames */
1120 u64 rx_words; /* Rx words */ 1195 u64 rx_words; /* Rx words */
1121 u64 lip_count; /* Rx LIP */ 1196 u64 lip_count; /* Rx LIP */
1197 u64 rx_lip_f7f7; /* Rx LIP_F7F7 */
1198 u64 rx_lip_f8f7; /* Rx LIP_F8F7 */
1199 u64 rx_arbf0; /* Rx ARB F0 */
1122 u64 nos_count; /* Rx NOS */ 1200 u64 nos_count; /* Rx NOS */
1123 u64 ols_count; /* Rx OLS */ 1201 u64 ols_count; /* Rx OLS */
1124 u64 lr_count; /* Rx LR */ 1202 u64 lr_count; /* Rx LR */
@@ -1140,6 +1218,7 @@ struct bfa_port_fc_stats_s {
1140 u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ 1218 u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */
1141 u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ 1219 u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */
1142 u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ 1220 u64 bbsc_link_resets; /* Credit Recovery-Link Resets */
1221 u64 loop_timeouts; /* Loop timeouts */
1143}; 1222};
1144 1223
1145/* 1224/*
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index e0beb4d7e264..bea821b98030 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -24,6 +24,7 @@ typedef u64 wwn_t;
24 24
25#define WWN_NULL (0) 25#define WWN_NULL (0)
26#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ 26#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */
27#define FC_ALPA_MAX 128
27 28
28#pragma pack(1) 29#pragma pack(1)
29 30
@@ -1015,6 +1016,10 @@ struct fc_symname_s {
1015 u8 symname[FC_SYMNAME_MAX]; 1016 u8 symname[FC_SYMNAME_MAX];
1016}; 1017};
1017 1018
1019struct fc_alpabm_s {
1020 u8 alpa_bm[FC_ALPA_MAX / 8];
1021};
1022
1018/* 1023/*
1019 * protocol default timeout values 1024 * protocol default timeout values
1020 */ 1025 */
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 273cee90b3b4..dce787f6cca2 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -228,6 +228,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
228 228
229 memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 229 memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
230 230
231 /* For FC AL bb_cr is 0 and altbbcred is 1 */
232 if (!bb_cr)
233 plogi->csp.altbbcred = 1;
234
231 plogi->els_cmd.els_code = els_code; 235 plogi->els_cmd.els_code = els_code;
232 if (els_code == FC_ELS_PLOGI) 236 if (els_code == FC_ELS_PLOGI)
233 fc_els_req_build(fchs, d_id, s_id, ox_id); 237 fc_els_req_build(fchs, d_id, s_id, ox_id);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 1633963c66ca..27b560962357 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -158,6 +158,7 @@ enum bfa_tskim_event {
158 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ 158 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
159 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ 159 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
160 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ 160 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
161 BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
161}; 162};
162 163
163/* 164/*
@@ -3036,7 +3037,7 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
3036static void 3037static void
3037bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3038bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3038{ 3039{
3039 bfa_trc(tskim->bfa, event); 3040 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3040 3041
3041 switch (event) { 3042 switch (event) {
3042 case BFA_TSKIM_SM_START: 3043 case BFA_TSKIM_SM_START:
@@ -3074,7 +3075,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3074static void 3075static void
3075bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3076bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3076{ 3077{
3077 bfa_trc(tskim->bfa, event); 3078 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3078 3079
3079 switch (event) { 3080 switch (event) {
3080 case BFA_TSKIM_SM_DONE: 3081 case BFA_TSKIM_SM_DONE:
@@ -3110,7 +3111,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3110static void 3111static void
3111bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3112bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3112{ 3113{
3113 bfa_trc(tskim->bfa, event); 3114 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3114 3115
3115 switch (event) { 3116 switch (event) {
3116 case BFA_TSKIM_SM_DONE: 3117 case BFA_TSKIM_SM_DONE:
@@ -3119,6 +3120,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3119 */ 3120 */
3120 break; 3121 break;
3121 3122
3123 case BFA_TSKIM_SM_UTAG:
3122 case BFA_TSKIM_SM_CLEANUP_DONE: 3124 case BFA_TSKIM_SM_CLEANUP_DONE:
3123 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); 3125 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3124 bfa_tskim_cleanup_ios(tskim); 3126 bfa_tskim_cleanup_ios(tskim);
@@ -3138,7 +3140,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3138static void 3140static void
3139bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3141bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3140{ 3142{
3141 bfa_trc(tskim->bfa, event); 3143 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3142 3144
3143 switch (event) { 3145 switch (event) {
3144 case BFA_TSKIM_SM_IOS_DONE: 3146 case BFA_TSKIM_SM_IOS_DONE:
@@ -3170,7 +3172,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3170static void 3172static void
3171bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3173bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3172{ 3174{
3173 bfa_trc(tskim->bfa, event); 3175 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3174 3176
3175 switch (event) { 3177 switch (event) {
3176 case BFA_TSKIM_SM_QRESUME: 3178 case BFA_TSKIM_SM_QRESUME:
@@ -3207,7 +3209,7 @@ static void
3207bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, 3209bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3208 enum bfa_tskim_event event) 3210 enum bfa_tskim_event event)
3209{ 3211{
3210 bfa_trc(tskim->bfa, event); 3212 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3211 3213
3212 switch (event) { 3214 switch (event) {
3213 case BFA_TSKIM_SM_DONE: 3215 case BFA_TSKIM_SM_DONE:
@@ -3238,7 +3240,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3238static void 3240static void
3239bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) 3241bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3240{ 3242{
3241 bfa_trc(tskim->bfa, event); 3243 bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3242 3244
3243 switch (event) { 3245 switch (event) {
3244 case BFA_TSKIM_SM_HCB: 3246 case BFA_TSKIM_SM_HCB:
@@ -3560,6 +3562,8 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3560 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { 3562 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3561 bfa_stats(tskim->itnim, tm_cleanup_comps); 3563 bfa_stats(tskim->itnim, tm_cleanup_comps);
3562 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); 3564 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3565 } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3566 bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
3563 } else { 3567 } else {
3564 bfa_stats(tskim->itnim, tm_fw_rsps); 3568 bfa_stats(tskim->itnim, tm_fw_rsps);
3565 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); 3569 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
@@ -3699,6 +3703,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3699 struct bfa_mem_dma_s *seg_ptr; 3703 struct bfa_mem_dma_s *seg_ptr;
3700 u16 idx, nsegs, num_io_req; 3704 u16 idx, nsegs, num_io_req;
3701 3705
3706 fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3702 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 3707 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3703 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; 3708 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
3704 fcp->num_itns = cfg->fwcfg.num_rports; 3709 fcp->num_itns = cfg->fwcfg.num_rports;
@@ -3721,6 +3726,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3721 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); 3726 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3722 } 3727 }
3723 3728
3729 fcp->throttle_update_required = 1;
3724 bfa_fcpim_attach(fcp, bfad, cfg, pcidev); 3730 bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3725 3731
3726 bfa_iotag_attach(fcp); 3732 bfa_iotag_attach(fcp);
@@ -3759,23 +3765,33 @@ bfa_fcp_iocdisable(struct bfa_s *bfa)
3759{ 3765{
3760 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); 3766 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3761 3767
3762 /* Enqueue unused ioim resources to free_q */
3763 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3764
3765 bfa_fcpim_iocdisable(fcp); 3768 bfa_fcpim_iocdisable(fcp);
3766} 3769}
3767 3770
3768void 3771void
3769bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw) 3772bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3770{ 3773{
3771 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); 3774 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
3772 struct list_head *qe; 3775 struct list_head *qe;
3773 int i; 3776 int i;
3774 3777
3778 /* Update io throttle value only once during driver load time */
3779 if (!mod->throttle_update_required)
3780 return;
3781
3775 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { 3782 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3776 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); 3783 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3777 list_add_tail(qe, &mod->iotag_unused_q); 3784 list_add_tail(qe, &mod->iotag_unused_q);
3778 } 3785 }
3786
3787 if (mod->num_ioim_reqs != num_ioim_fw) {
3788 bfa_trc(bfa, mod->num_ioim_reqs);
3789 bfa_trc(bfa, num_ioim_fw);
3790 }
3791
3792 mod->max_ioim_reqs = max_ioim_fw;
3793 mod->num_ioim_reqs = num_ioim_fw;
3794 mod->throttle_update_required = 0;
3779} 3795}
3780 3796
3781void 3797void
@@ -3833,3 +3849,88 @@ bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3833 3849
3834 bfa_mem_kva_curp(fcp) = (u8 *) iotag; 3850 bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3835} 3851}
3852
3853
3854/**
3855 * To send config req, first try to use throttle value from flash
3856 * If 0, then use driver parameter
3857 * We need to use min(flash_val, drv_val) because
3858 * memory allocation was done based on this cfg'd value
3859 */
3860u16
3861bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3862{
3863 u16 tmp;
3864 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3865
3866 /*
3867 * If throttle value from flash is already in effect after driver is
3868 * loaded then until next load, always return current value instead
3869 * of actual flash value
3870 */
3871 if (!fcp->throttle_update_required)
3872 return (u16)fcp->num_ioim_reqs;
3873
3874 tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3875 if (!tmp || (tmp > drv_cfg_param))
3876 tmp = drv_cfg_param;
3877
3878 return tmp;
3879}
3880
3881bfa_status_t
3882bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
3883{
3884 if (!bfa_dconf_get_min_cfg(bfa)) {
3885 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3886 BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3887 return BFA_STATUS_OK;
3888 }
3889
3890 return BFA_STATUS_FAILED;
3891}
3892
3893u16
3894bfa_fcpim_read_throttle(struct bfa_s *bfa)
3895{
3896 struct bfa_throttle_cfg_s *throttle_cfg =
3897 &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3898
3899 return ((!bfa_dconf_get_min_cfg(bfa)) ?
3900 ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3901}
3902
3903bfa_status_t
3904bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
3905{
3906 /* in min cfg no commands should run. */
3907 if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3908 (!bfa_dconf_read_data_valid(bfa)))
3909 return BFA_STATUS_FAILED;
3910
3911 bfa_fcpim_write_throttle(bfa, value);
3912
3913 return bfa_dconf_update(bfa);
3914}
3915
3916bfa_status_t
3917bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3918{
3919 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3920 struct bfa_defs_fcpim_throttle_s throttle;
3921
3922 if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3923 (!bfa_dconf_read_data_valid(bfa)))
3924 return BFA_STATUS_FAILED;
3925
3926 memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3927
3928 throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3929 throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3930 if (!throttle.cfg_value)
3931 throttle.cfg_value = throttle.cur_value;
3932 throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3933 memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3934
3935 return BFA_STATUS_OK;
3936}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 36f26da80f76..e693af6e5930 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -42,7 +42,7 @@ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
42 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); 42 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
43void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); 43void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
44void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); 44void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
45void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); 45void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw);
46 46
47#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) 47#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod)
48#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) 48#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
@@ -51,7 +51,9 @@ void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
51#define BFA_ITN_FROM_TAG(_fcp, _tag) \ 51#define BFA_ITN_FROM_TAG(_fcp, _tag) \
52 ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1))) 52 ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
53#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \ 53#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
54 bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN) 54 bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK), \
55 BFI_IOIM_SNSLEN)
56
55 57
56#define BFA_ITNIM_MIN 32 58#define BFA_ITNIM_MIN 32
57#define BFA_ITNIM_MAX 1024 59#define BFA_ITNIM_MAX 1024
@@ -148,6 +150,7 @@ struct bfa_fcp_mod_s {
148 struct list_head iotag_unused_q; /* unused IO resources*/ 150 struct list_head iotag_unused_q; /* unused IO resources*/
149 struct bfa_iotag_s *iotag_arr; 151 struct bfa_iotag_s *iotag_arr;
150 struct bfa_itn_s *itn_arr; 152 struct bfa_itn_s *itn_arr;
153 int max_ioim_reqs;
151 int num_ioim_reqs; 154 int num_ioim_reqs;
152 int num_fwtio_reqs; 155 int num_fwtio_reqs;
153 int num_itns; 156 int num_itns;
@@ -155,6 +158,7 @@ struct bfa_fcp_mod_s {
155 struct bfa_fcpim_s fcpim; 158 struct bfa_fcpim_s fcpim;
156 struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; 159 struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS];
157 struct bfa_mem_kva_s kva_seg; 160 struct bfa_mem_kva_s kva_seg;
161 int throttle_update_required;
158}; 162};
159 163
160/* 164/*
@@ -416,5 +420,10 @@ bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
416bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, 420bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
417 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); 421 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
418bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); 422bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
423u16 bfa_fcpim_read_throttle(struct bfa_s *bfa);
424bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value);
425bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value);
426bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf);
427u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param);
419 428
420#endif /* __BFA_FCPIM_H__ */ 429#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index fd3e84d32bd2..d428808fb37e 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -303,16 +303,30 @@ static void
303bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, 303bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
304 enum bfa_fcs_fabric_event event) 304 enum bfa_fcs_fabric_event event)
305{ 305{
306 struct bfa_s *bfa = fabric->fcs->bfa;
307
306 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 308 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
307 bfa_trc(fabric->fcs, event); 309 bfa_trc(fabric->fcs, event);
308 310
309 switch (event) { 311 switch (event) {
310 case BFA_FCS_FABRIC_SM_START: 312 case BFA_FCS_FABRIC_SM_START:
311 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { 313 if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) {
314 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
315 break;
316 }
317 if (bfa_fcport_get_topology(bfa) ==
318 BFA_PORT_TOPOLOGY_LOOP) {
319 fabric->fab_type = BFA_FCS_FABRIC_LOOP;
320 fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
321 fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
322 bfa_sm_set_state(fabric,
323 bfa_fcs_fabric_sm_online);
324 bfa_fcs_fabric_set_opertype(fabric);
325 bfa_fcs_lport_online(&fabric->bport);
326 } else {
312 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 327 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
313 bfa_fcs_fabric_login(fabric); 328 bfa_fcs_fabric_login(fabric);
314 } else 329 }
315 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
316 break; 330 break;
317 331
318 case BFA_FCS_FABRIC_SM_LINK_UP: 332 case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -337,16 +351,28 @@ static void
337bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, 351bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
338 enum bfa_fcs_fabric_event event) 352 enum bfa_fcs_fabric_event event)
339{ 353{
354 struct bfa_s *bfa = fabric->fcs->bfa;
355
340 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 356 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
341 bfa_trc(fabric->fcs, event); 357 bfa_trc(fabric->fcs, event);
342 358
343 switch (event) { 359 switch (event) {
344 case BFA_FCS_FABRIC_SM_LINK_UP: 360 case BFA_FCS_FABRIC_SM_LINK_UP:
345 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 361 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) {
346 bfa_fcs_fabric_login(fabric); 362 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
363 bfa_fcs_fabric_login(fabric);
364 break;
365 }
366 fabric->fab_type = BFA_FCS_FABRIC_LOOP;
367 fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
368 fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
369 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
370 bfa_fcs_fabric_set_opertype(fabric);
371 bfa_fcs_lport_online(&fabric->bport);
347 break; 372 break;
348 373
349 case BFA_FCS_FABRIC_SM_RETRY_OP: 374 case BFA_FCS_FABRIC_SM_RETRY_OP:
375 case BFA_FCS_FABRIC_SM_LOOPBACK:
350 break; 376 break;
351 377
352 case BFA_FCS_FABRIC_SM_DELETE: 378 case BFA_FCS_FABRIC_SM_DELETE:
@@ -595,14 +621,20 @@ void
595bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, 621bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
596 enum bfa_fcs_fabric_event event) 622 enum bfa_fcs_fabric_event event)
597{ 623{
624 struct bfa_s *bfa = fabric->fcs->bfa;
625
598 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 626 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
599 bfa_trc(fabric->fcs, event); 627 bfa_trc(fabric->fcs, event);
600 628
601 switch (event) { 629 switch (event) {
602 case BFA_FCS_FABRIC_SM_LINK_DOWN: 630 case BFA_FCS_FABRIC_SM_LINK_DOWN:
603 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); 631 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
604 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); 632 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
605 bfa_fcs_fabric_notify_offline(fabric); 633 bfa_fcs_lport_offline(&fabric->bport);
634 } else {
635 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
636 bfa_fcs_fabric_notify_offline(fabric);
637 }
606 break; 638 break;
607 639
608 case BFA_FCS_FABRIC_SM_DELETE: 640 case BFA_FCS_FABRIC_SM_DELETE:
@@ -719,20 +751,29 @@ static void
719bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric, 751bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
720 enum bfa_fcs_fabric_event event) 752 enum bfa_fcs_fabric_event event)
721{ 753{
754 struct bfa_s *bfa = fabric->fcs->bfa;
755
722 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 756 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
723 bfa_trc(fabric->fcs, event); 757 bfa_trc(fabric->fcs, event);
724 758
725 switch (event) { 759 switch (event) {
726 case BFA_FCS_FABRIC_SM_STOPCOMP: 760 case BFA_FCS_FABRIC_SM_STOPCOMP:
727 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); 761 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
728 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT); 762 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
763 } else {
764 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
765 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
766 }
729 break; 767 break;
730 768
731 case BFA_FCS_FABRIC_SM_LINK_UP: 769 case BFA_FCS_FABRIC_SM_LINK_UP:
732 break; 770 break;
733 771
734 case BFA_FCS_FABRIC_SM_LINK_DOWN: 772 case BFA_FCS_FABRIC_SM_LINK_DOWN:
735 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup); 773 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
774 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
775 else
776 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
736 break; 777 break;
737 778
738 default: 779 default:
@@ -975,9 +1016,6 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
975 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; 1016 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
976 u8 alpa = 0, bb_scn = 0; 1017 u8 alpa = 0, bb_scn = 0;
977 1018
978 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
979 alpa = bfa_fcport_get_myalpa(bfa);
980
981 if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && 1019 if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
982 (!fabric->fcs->bbscn_flogi_rjt)) 1020 (!fabric->fcs->bbscn_flogi_rjt))
983 bb_scn = BFA_FCS_PORT_DEF_BB_SCN; 1021 bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 6c4377cb287f..a449706c6bc0 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -118,9 +118,9 @@ struct bfa_fcs_lport_fab_s {
118#define MAX_ALPA_COUNT 127 118#define MAX_ALPA_COUNT 127
119 119
120struct bfa_fcs_lport_loop_s { 120struct bfa_fcs_lport_loop_s {
121 u8 num_alpa; /* Num of ALPA entries in the map */ 121 u8 num_alpa; /* Num of ALPA entries in the map */
122 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional 122 u8 alpabm_valid; /* alpa bitmap valid or not (1 or 0) */
123 *Map */ 123 u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional Map */
124 struct bfa_fcs_lport_s *port; /* parent port */ 124 struct bfa_fcs_lport_s *port; /* parent port */
125}; 125};
126 126
@@ -175,6 +175,7 @@ enum bfa_fcs_fabric_type {
175 BFA_FCS_FABRIC_UNKNOWN = 0, 175 BFA_FCS_FABRIC_UNKNOWN = 0,
176 BFA_FCS_FABRIC_SWITCHED = 1, 176 BFA_FCS_FABRIC_SWITCHED = 1,
177 BFA_FCS_FABRIC_N2N = 2, 177 BFA_FCS_FABRIC_N2N = 2,
178 BFA_FCS_FABRIC_LOOP = 3,
178}; 179};
179 180
180 181
@@ -350,9 +351,10 @@ void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
350 struct bfa_fcxp_s *fcxp_alloced); 351 struct bfa_fcxp_s *fcxp_alloced);
351void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); 352void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
352void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); 353void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
353void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport); 354void bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport);
354void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, 355void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
355 struct fchs_s *rx_frame, u32 len); 356 struct fchs_s *rx_frame, u32 len);
357void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
356 358
357struct bfa_fcs_vport_s { 359struct bfa_fcs_vport_s {
358 struct list_head qe; /* queue elem */ 360 struct list_head qe; /* queue elem */
@@ -453,6 +455,7 @@ struct bfa_fcs_rport_s {
453 struct bfa_rport_stats_s stats; /* rport stats */ 455 struct bfa_rport_stats_s stats; /* rport stats */
454 enum bfa_rport_function scsi_function; /* Initiator/Target */ 456 enum bfa_rport_function scsi_function; /* Initiator/Target */
455 struct bfa_fcs_rpf_s rpf; /* Rport features module */ 457 struct bfa_fcs_rpf_s rpf; /* Rport features module */
458 bfa_boolean_t scn_online; /* SCN online flag */
456}; 459};
457 460
458static inline struct bfa_rport_s * 461static inline struct bfa_rport_s *
@@ -639,9 +642,9 @@ struct bfa_fcs_fdmi_hba_attr_s {
639 u8 model[16]; 642 u8 model[16];
640 u8 model_desc[256]; 643 u8 model_desc[256];
641 u8 hw_version[8]; 644 u8 hw_version[8];
642 u8 driver_version[8]; 645 u8 driver_version[BFA_VERSION_LEN];
643 u8 option_rom_ver[BFA_VERSION_LEN]; 646 u8 option_rom_ver[BFA_VERSION_LEN];
644 u8 fw_version[8]; 647 u8 fw_version[BFA_VERSION_LEN];
645 u8 os_name[256]; 648 u8 os_name[256];
646 __be32 max_ct_pyld; 649 __be32 max_ct_pyld;
647}; 650};
@@ -733,7 +736,7 @@ enum rport_event {
733 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ 736 RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
734 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ 737 RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
735 RPSM_EVENT_DELETE = 7, /* RPORT delete request */ 738 RPSM_EVENT_DELETE = 7, /* RPORT delete request */
736 RPSM_EVENT_SCN = 8, /* state change notification */ 739 RPSM_EVENT_FAB_SCN = 8, /* state change notification */
737 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ 740 RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
738 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ 741 RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
739 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ 742 RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
@@ -744,7 +747,9 @@ enum rport_event {
744 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ 747 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
745 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ 748 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
746 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ 749 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
747 RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */ 750 RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
751 RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
752 RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
748}; 753};
749 754
750/* 755/*
@@ -763,7 +768,7 @@ enum bfa_fcs_itnim_event {
763 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ 768 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
764 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ 769 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
765 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */ 770 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
766 BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */ 771 BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
767}; 772};
768 773
769/* 774/*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 3b75f6fb2de1..1224d0462a49 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -23,6 +23,34 @@
23 23
24BFA_TRC_FILE(FCS, PORT); 24BFA_TRC_FILE(FCS, PORT);
25 25
26/*
27 * ALPA to LIXA bitmap mapping
28 *
29 * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
30 * is for L_bit (login required) and is filled as ALPA 0x00 here.
31 */
32static const u8 loop_alpa_map[] = {
33 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */
34 0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */
35 0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */
36 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */
37
38 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */
39 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */
40 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */
41 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */
42
43 0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */
44 0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */
45 0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */
46 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */
47
48 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */
49 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */
50 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */
51 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */
52};
53
26static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, 54static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
27 struct fchs_s *rx_fchs, u8 reason_code, 55 struct fchs_s *rx_fchs, u8 reason_code,
28 u8 reason_code_expl); 56 u8 reason_code_expl);
@@ -51,6 +79,10 @@ static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
51static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); 79static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
52static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); 80static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
53 81
82static void bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port);
83static void bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port);
84static void bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port);
85
54static struct { 86static struct {
55 void (*init) (struct bfa_fcs_lport_s *port); 87 void (*init) (struct bfa_fcs_lport_s *port);
56 void (*online) (struct bfa_fcs_lport_s *port); 88 void (*online) (struct bfa_fcs_lport_s *port);
@@ -62,7 +94,9 @@ static struct {
62 bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, 94 bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
63 bfa_fcs_lport_fab_offline}, { 95 bfa_fcs_lport_fab_offline}, {
64 bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, 96 bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
65 bfa_fcs_lport_n2n_offline}, 97 bfa_fcs_lport_n2n_offline}, {
98 bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
99 bfa_fcs_lport_loop_offline},
66 }; 100 };
67 101
68/* 102/*
@@ -1127,7 +1161,7 @@ static void
1127bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) 1161bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
1128{ 1162{
1129 bfa_fcs_lport_ns_online(port); 1163 bfa_fcs_lport_ns_online(port);
1130 bfa_fcs_lport_scn_online(port); 1164 bfa_fcs_lport_fab_scn_online(port);
1131} 1165}
1132 1166
1133/* 1167/*
@@ -1221,6 +1255,98 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
1221 n2n_port->reply_oxid = 0; 1255 n2n_port->reply_oxid = 0;
1222} 1256}
1223 1257
1258void
1259bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
1260{
1261 int i = 0, j = 0, bit = 0, alpa_bit = 0;
1262 u8 k = 0;
1263 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa);
1264
1265 port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid;
1266 port->pid = fcport->myalpa;
1267 port->pid = bfa_hton3b(port->pid);
1268
1269 for (i = 0; i < (FC_ALPA_MAX / 8); i++) {
1270 for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) {
1271 bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]);
1272 bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j)));
1273 if (bit) {
1274 port->port_topo.ploop.alpa_pos_map[k] =
1275 loop_alpa_map[(i * 8) + alpa_bit];
1276 k++;
1277 bfa_trc(port->fcs->bfa, k);
1278 bfa_trc(port->fcs->bfa,
1279 port->port_topo.ploop.alpa_pos_map[k]);
1280 }
1281 }
1282 }
1283 port->port_topo.ploop.num_alpa = k;
1284}
1285
1286/*
1287 * Called by fcs/port to initialize Loop topology.
1288 */
1289static void
1290bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port)
1291{
1292}
1293
1294/*
1295 * Called by fcs/port to notify transition to online state.
1296 */
1297static void
1298bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port)
1299{
1300 u8 num_alpa = 0, alpabm_valid = 0;
1301 struct bfa_fcs_rport_s *rport;
1302 u8 *alpa_map = NULL;
1303 int i = 0;
1304 u32 pid;
1305
1306 bfa_fcport_get_loop_attr(port);
1307
1308 num_alpa = port->port_topo.ploop.num_alpa;
1309 alpabm_valid = port->port_topo.ploop.alpabm_valid;
1310 alpa_map = port->port_topo.ploop.alpa_pos_map;
1311
1312 bfa_trc(port->fcs->bfa, port->pid);
1313 bfa_trc(port->fcs->bfa, num_alpa);
1314 if (alpabm_valid == 1) {
1315 for (i = 0; i < num_alpa; i++) {
1316 bfa_trc(port->fcs->bfa, alpa_map[i]);
1317 if (alpa_map[i] != bfa_hton3b(port->pid)) {
1318 pid = alpa_map[i];
1319 bfa_trc(port->fcs->bfa, pid);
1320 rport = bfa_fcs_lport_get_rport_by_pid(port,
1321 bfa_hton3b(pid));
1322 if (!rport)
1323 rport = bfa_fcs_rport_create(port,
1324 bfa_hton3b(pid));
1325 }
1326 }
1327 } else {
1328 for (i = 0; i < MAX_ALPA_COUNT; i++) {
1329 if (alpa_map[i] != port->pid) {
1330 pid = loop_alpa_map[i];
1331 bfa_trc(port->fcs->bfa, pid);
1332 rport = bfa_fcs_lport_get_rport_by_pid(port,
1333 bfa_hton3b(pid));
1334 if (!rport)
1335 rport = bfa_fcs_rport_create(port,
1336 bfa_hton3b(pid));
1337 }
1338 }
1339 }
1340}
1341
1342/*
1343 * Called by fcs/port to notify transition to offline state.
1344 */
1345static void
1346bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port)
1347{
1348}
1349
1224#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 1350#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
1225 1351
1226/* 1352/*
@@ -1888,13 +2014,10 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1888 sizeof(templen)); 2014 sizeof(templen));
1889 } 2015 }
1890 2016
1891 /*
1892 * f/w Version = driver version
1893 */
1894 attr = (struct fdmi_attr_s *) curr_ptr; 2017 attr = (struct fdmi_attr_s *) curr_ptr;
1895 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); 2018 attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
1896 templen = (u16) strlen(fcs_hba_attr->driver_version); 2019 templen = (u16) strlen(fcs_hba_attr->fw_version);
1897 memcpy(attr->value, fcs_hba_attr->driver_version, templen); 2020 memcpy(attr->value, fcs_hba_attr->fw_version, templen);
1898 templen = fc_roundup(templen, sizeof(u32)); 2021 templen = fc_roundup(templen, sizeof(u32));
1899 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; 2022 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1900 len += templen; 2023 len += templen;
@@ -2296,6 +2419,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2296{ 2419{
2297 struct bfa_fcs_lport_s *port = fdmi->ms->port; 2420 struct bfa_fcs_lport_s *port = fdmi->ms->port;
2298 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 2421 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
2422 struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
2299 2423
2300 memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 2424 memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
2301 2425
@@ -2331,7 +2455,9 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2331 sizeof(driver_info->host_os_patch)); 2455 sizeof(driver_info->host_os_patch));
2332 } 2456 }
2333 2457
2334 hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); 2458 /* Retrieve the max frame size from the port attr */
2459 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
2460 hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
2335} 2461}
2336 2462
2337static void 2463static void
@@ -2391,7 +2517,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2391 /* 2517 /*
2392 * Max PDU Size. 2518 * Max PDU Size.
2393 */ 2519 */
2394 port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ); 2520 port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize);
2395 2521
2396 /* 2522 /*
2397 * OS device Name 2523 * OS device Name
@@ -5199,7 +5325,7 @@ bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
5199} 5325}
5200 5326
5201void 5327void
5202bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port) 5328bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port)
5203{ 5329{
5204 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); 5330 struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
5205 5331
@@ -5621,6 +5747,15 @@ bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
5621} 5747}
5622 5748
5623/* 5749/*
5750 * Let new loop map create missing rports
5751 */
5752void
5753bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port)
5754{
5755 bfa_fcs_lport_loop_online(port);
5756}
5757
5758/*
5624 * FCS virtual port state machine 5759 * FCS virtual port state machine
5625 */ 5760 */
5626 5761
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index cc43b2a58ce3..58ac643ba9f3 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -106,9 +106,13 @@ static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
106 enum rport_event event); 106 enum rport_event event);
107static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, 107static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
108 enum rport_event event); 108 enum rport_event event);
109static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 109static void bfa_fcs_rport_sm_adisc_online_sending(
110 enum rport_event event); 110 struct bfa_fcs_rport_s *rport, enum rport_event event);
111static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, 111static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
112 enum rport_event event);
113static void bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s
114 *rport, enum rport_event event);
115static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
112 enum rport_event event); 116 enum rport_event event);
113static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 117static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
114 enum rport_event event); 118 enum rport_event event);
@@ -150,8 +154,10 @@ static struct bfa_sm_table_s rport_sm_table[] = {
150 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, 154 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
151 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, 155 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
152 {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, 156 {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
153 {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC}, 157 {BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC},
154 {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC}, 158 {BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC},
159 {BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC},
160 {BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC},
155 {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, 161 {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
156 {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, 162 {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
157 {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, 163 {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
@@ -231,10 +237,19 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
231 bfa_fcs_rport_send_plogiacc(rport, NULL); 237 bfa_fcs_rport_send_plogiacc(rport, NULL);
232 break; 238 break;
233 239
240 case RPSM_EVENT_SCN_OFFLINE:
241 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
242 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
243 bfa_timer_start(rport->fcs->bfa, &rport->timer,
244 bfa_fcs_rport_timeout, rport,
245 bfa_fcs_rport_del_timeout);
246 break;
234 case RPSM_EVENT_ADDRESS_CHANGE: 247 case RPSM_EVENT_ADDRESS_CHANGE:
235 case RPSM_EVENT_SCN: 248 case RPSM_EVENT_FAB_SCN:
236 /* query the NS */ 249 /* query the NS */
237 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 250 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
251 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
252 BFA_PORT_TOPOLOGY_LOOP));
238 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 253 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
239 rport->ns_retries = 0; 254 rport->ns_retries = 0;
240 bfa_fcs_rport_send_nsdisc(rport, NULL); 255 bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -280,12 +295,20 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
280 295
281 case RPSM_EVENT_PLOGI_RCVD: 296 case RPSM_EVENT_PLOGI_RCVD:
282 case RPSM_EVENT_PLOGI_COMP: 297 case RPSM_EVENT_PLOGI_COMP:
283 case RPSM_EVENT_SCN: 298 case RPSM_EVENT_FAB_SCN:
284 /* 299 /*
285 * Ignore, SCN is possibly online notification. 300 * Ignore, SCN is possibly online notification.
286 */ 301 */
287 break; 302 break;
288 303
304 case RPSM_EVENT_SCN_OFFLINE:
305 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
306 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
307 bfa_timer_start(rport->fcs->bfa, &rport->timer,
308 bfa_fcs_rport_timeout, rport,
309 bfa_fcs_rport_del_timeout);
310 break;
311
289 case RPSM_EVENT_ADDRESS_CHANGE: 312 case RPSM_EVENT_ADDRESS_CHANGE:
290 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 313 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
291 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 314 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
@@ -346,9 +369,19 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
346 bfa_fcs_rport_send_plogiacc(rport, NULL); 369 bfa_fcs_rport_send_plogiacc(rport, NULL);
347 break; 370 break;
348 371
372 case RPSM_EVENT_SCN_OFFLINE:
373 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
374 bfa_timer_stop(&rport->timer);
375 bfa_timer_start(rport->fcs->bfa, &rport->timer,
376 bfa_fcs_rport_timeout, rport,
377 bfa_fcs_rport_del_timeout);
378 break;
379
349 case RPSM_EVENT_ADDRESS_CHANGE: 380 case RPSM_EVENT_ADDRESS_CHANGE:
350 case RPSM_EVENT_SCN: 381 case RPSM_EVENT_FAB_SCN:
351 bfa_timer_stop(&rport->timer); 382 bfa_timer_stop(&rport->timer);
383 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
384 BFA_PORT_TOPOLOGY_LOOP));
352 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 385 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
353 rport->ns_retries = 0; 386 rport->ns_retries = 0;
354 bfa_fcs_rport_send_nsdisc(rport, NULL); 387 bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -422,7 +455,18 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
422 } 455 }
423 break; 456 break;
424 457
425 case RPSM_EVENT_PLOGI_RETRY: 458 case RPSM_EVENT_SCN_ONLINE:
459 break;
460
461 case RPSM_EVENT_SCN_OFFLINE:
462 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
463 bfa_fcxp_discard(rport->fcxp);
464 bfa_timer_start(rport->fcs->bfa, &rport->timer,
465 bfa_fcs_rport_timeout, rport,
466 bfa_fcs_rport_del_timeout);
467 break;
468
469 case RPSM_EVENT_PLOGI_RETRY:
426 rport->plogi_retries = 0; 470 rport->plogi_retries = 0;
427 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); 471 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
428 bfa_timer_start(rport->fcs->bfa, &rport->timer, 472 bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -440,8 +484,10 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
440 break; 484 break;
441 485
442 case RPSM_EVENT_ADDRESS_CHANGE: 486 case RPSM_EVENT_ADDRESS_CHANGE:
443 case RPSM_EVENT_SCN: 487 case RPSM_EVENT_FAB_SCN:
444 bfa_fcxp_discard(rport->fcxp); 488 bfa_fcxp_discard(rport->fcxp);
489 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
490 BFA_PORT_TOPOLOGY_LOOP));
445 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 491 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
446 rport->ns_retries = 0; 492 rport->ns_retries = 0;
447 bfa_fcs_rport_send_nsdisc(rport, NULL); 493 bfa_fcs_rport_send_nsdisc(rport, NULL);
@@ -512,7 +558,8 @@ bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
512 case RPSM_EVENT_PLOGI_COMP: 558 case RPSM_EVENT_PLOGI_COMP:
513 case RPSM_EVENT_LOGO_IMP: 559 case RPSM_EVENT_LOGO_IMP:
514 case RPSM_EVENT_ADDRESS_CHANGE: 560 case RPSM_EVENT_ADDRESS_CHANGE:
515 case RPSM_EVENT_SCN: 561 case RPSM_EVENT_FAB_SCN:
562 case RPSM_EVENT_SCN_OFFLINE:
516 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 563 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
517 bfa_fcs_rport_fcs_offline_action(rport); 564 bfa_fcs_rport_fcs_offline_action(rport);
518 break; 565 break;
@@ -561,9 +608,10 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
561 bfa_fcs_rport_fcs_offline_action(rport); 608 bfa_fcs_rport_fcs_offline_action(rport);
562 break; 609 break;
563 610
564 case RPSM_EVENT_SCN: 611 case RPSM_EVENT_FAB_SCN:
565 case RPSM_EVENT_LOGO_IMP: 612 case RPSM_EVENT_LOGO_IMP:
566 case RPSM_EVENT_ADDRESS_CHANGE: 613 case RPSM_EVENT_ADDRESS_CHANGE:
614 case RPSM_EVENT_SCN_OFFLINE:
567 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 615 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
568 bfa_fcs_rport_fcs_offline_action(rport); 616 bfa_fcs_rport_fcs_offline_action(rport);
569 break; 617 break;
@@ -595,14 +643,15 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
595 bfa_trc(rport->fcs, event); 643 bfa_trc(rport->fcs, event);
596 644
597 switch (event) { 645 switch (event) {
598 case RPSM_EVENT_SCN: 646 case RPSM_EVENT_FAB_SCN:
599 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 647 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
600 bfa_sm_set_state(rport, 648 bfa_sm_set_state(rport,
601 bfa_fcs_rport_sm_nsquery_sending); 649 bfa_fcs_rport_sm_nsquery_sending);
602 rport->ns_retries = 0; 650 rport->ns_retries = 0;
603 bfa_fcs_rport_send_nsdisc(rport, NULL); 651 bfa_fcs_rport_send_nsdisc(rport, NULL);
604 } else { 652 } else {
605 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); 653 bfa_sm_set_state(rport,
654 bfa_fcs_rport_sm_adisc_online_sending);
606 bfa_fcs_rport_send_adisc(rport, NULL); 655 bfa_fcs_rport_send_adisc(rport, NULL);
607 } 656 }
608 break; 657 break;
@@ -610,6 +659,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
610 case RPSM_EVENT_PLOGI_RCVD: 659 case RPSM_EVENT_PLOGI_RCVD:
611 case RPSM_EVENT_LOGO_IMP: 660 case RPSM_EVENT_LOGO_IMP:
612 case RPSM_EVENT_ADDRESS_CHANGE: 661 case RPSM_EVENT_ADDRESS_CHANGE:
662 case RPSM_EVENT_SCN_OFFLINE:
613 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 663 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
614 bfa_fcs_rport_hal_offline_action(rport); 664 bfa_fcs_rport_hal_offline_action(rport);
615 break; 665 break;
@@ -625,6 +675,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
625 bfa_fcs_rport_hal_offline_action(rport); 675 bfa_fcs_rport_hal_offline_action(rport);
626 break; 676 break;
627 677
678 case RPSM_EVENT_SCN_ONLINE:
628 case RPSM_EVENT_PLOGI_COMP: 679 case RPSM_EVENT_PLOGI_COMP:
629 break; 680 break;
630 681
@@ -656,7 +707,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
656 bfa_fcs_rport_hal_offline_action(rport); 707 bfa_fcs_rport_hal_offline_action(rport);
657 break; 708 break;
658 709
659 case RPSM_EVENT_SCN: 710 case RPSM_EVENT_FAB_SCN:
660 /* 711 /*
661 * ignore SCN, wait for response to query itself 712 * ignore SCN, wait for response to query itself
662 */ 713 */
@@ -696,7 +747,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
696 747
697 switch (event) { 748 switch (event) {
698 case RPSM_EVENT_ACCEPTED: 749 case RPSM_EVENT_ACCEPTED:
699 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); 750 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending);
700 bfa_fcs_rport_send_adisc(rport, NULL); 751 bfa_fcs_rport_send_adisc(rport, NULL);
701 break; 752 break;
702 753
@@ -718,7 +769,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
718 bfa_fcs_rport_hal_offline_action(rport); 769 bfa_fcs_rport_hal_offline_action(rport);
719 break; 770 break;
720 771
721 case RPSM_EVENT_SCN: 772 case RPSM_EVENT_FAB_SCN:
722 break; 773 break;
723 774
724 case RPSM_EVENT_LOGO_RCVD: 775 case RPSM_EVENT_LOGO_RCVD:
@@ -747,7 +798,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
747 * authenticating with rport. FC-4s are paused. 798 * authenticating with rport. FC-4s are paused.
748 */ 799 */
749static void 800static void
750bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, 801bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport,
751 enum rport_event event) 802 enum rport_event event)
752{ 803{
753 bfa_trc(rport->fcs, rport->pwwn); 804 bfa_trc(rport->fcs, rport->pwwn);
@@ -756,7 +807,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
756 807
757 switch (event) { 808 switch (event) {
758 case RPSM_EVENT_FCXP_SENT: 809 case RPSM_EVENT_FCXP_SENT:
759 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc); 810 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online);
760 break; 811 break;
761 812
762 case RPSM_EVENT_DELETE: 813 case RPSM_EVENT_DELETE:
@@ -779,7 +830,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
779 bfa_fcs_rport_hal_offline_action(rport); 830 bfa_fcs_rport_hal_offline_action(rport);
780 break; 831 break;
781 832
782 case RPSM_EVENT_SCN: 833 case RPSM_EVENT_FAB_SCN:
783 break; 834 break;
784 835
785 case RPSM_EVENT_PLOGI_RCVD: 836 case RPSM_EVENT_PLOGI_RCVD:
@@ -798,7 +849,8 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
798 * FC-4s are paused. 849 * FC-4s are paused.
799 */ 850 */
800static void 851static void
801bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) 852bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
853 enum rport_event event)
802{ 854{
803 bfa_trc(rport->fcs, rport->pwwn); 855 bfa_trc(rport->fcs, rport->pwwn);
804 bfa_trc(rport->fcs, rport->pid); 856 bfa_trc(rport->fcs, rport->pid);
@@ -831,7 +883,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
831 bfa_fcs_rport_hal_offline_action(rport); 883 bfa_fcs_rport_hal_offline_action(rport);
832 break; 884 break;
833 885
834 case RPSM_EVENT_SCN: 886 case RPSM_EVENT_FAB_SCN:
835 /* 887 /*
836 * already processing RSCN 888 * already processing RSCN
837 */ 889 */
@@ -856,7 +908,96 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
856} 908}
857 909
858/* 910/*
859 * Rport has sent LOGO. Awaiting FC-4 offline completion callback. 911 * ADISC is being sent for authenticating with rport
912 * Already did offline actions.
913 */
914static void
915bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport,
916 enum rport_event event)
917{
918 bfa_trc(rport->fcs, rport->pwwn);
919 bfa_trc(rport->fcs, rport->pid);
920 bfa_trc(rport->fcs, event);
921
922 switch (event) {
923 case RPSM_EVENT_FCXP_SENT:
924 bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline);
925 break;
926
927 case RPSM_EVENT_DELETE:
928 case RPSM_EVENT_SCN_OFFLINE:
929 case RPSM_EVENT_LOGO_IMP:
930 case RPSM_EVENT_LOGO_RCVD:
931 case RPSM_EVENT_PRLO_RCVD:
932 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
933 bfa_fcxp_walloc_cancel(rport->fcs->bfa,
934 &rport->fcxp_wqe);
935 bfa_timer_start(rport->fcs->bfa, &rport->timer,
936 bfa_fcs_rport_timeout, rport,
937 bfa_fcs_rport_del_timeout);
938 break;
939
940 case RPSM_EVENT_PLOGI_RCVD:
941 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
942 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
943 bfa_fcs_rport_send_plogiacc(rport, NULL);
944 break;
945
946 default:
947 bfa_sm_fault(rport->fcs, event);
948 }
949}
950
951/*
952 * ADISC to rport
953 * Already did offline actions
954 */
955static void
956bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
957 enum rport_event event)
958{
959 bfa_trc(rport->fcs, rport->pwwn);
960 bfa_trc(rport->fcs, rport->pid);
961 bfa_trc(rport->fcs, event);
962
963 switch (event) {
964 case RPSM_EVENT_ACCEPTED:
965 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
966 bfa_fcs_rport_hal_online(rport);
967 break;
968
969 case RPSM_EVENT_PLOGI_RCVD:
970 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
971 bfa_fcxp_discard(rport->fcxp);
972 bfa_fcs_rport_send_plogiacc(rport, NULL);
973 break;
974
975 case RPSM_EVENT_FAILED:
976 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
977 bfa_timer_start(rport->fcs->bfa, &rport->timer,
978 bfa_fcs_rport_timeout, rport,
979 bfa_fcs_rport_del_timeout);
980 break;
981
982 case RPSM_EVENT_DELETE:
983 case RPSM_EVENT_SCN_OFFLINE:
984 case RPSM_EVENT_LOGO_IMP:
985 case RPSM_EVENT_LOGO_RCVD:
986 case RPSM_EVENT_PRLO_RCVD:
987 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
988 bfa_fcxp_discard(rport->fcxp);
989 bfa_timer_start(rport->fcs->bfa, &rport->timer,
990 bfa_fcs_rport_timeout, rport,
991 bfa_fcs_rport_del_timeout);
992 break;
993
994 default:
995 bfa_sm_fault(rport->fcs, event);
996 }
997}
998
999/*
1000 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
860 */ 1001 */
861static void 1002static void
862bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, 1003bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
@@ -881,6 +1022,8 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
881 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete); 1022 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
882 break; 1023 break;
883 1024
1025 case RPSM_EVENT_SCN_ONLINE:
1026 case RPSM_EVENT_SCN_OFFLINE:
884 case RPSM_EVENT_HCB_ONLINE: 1027 case RPSM_EVENT_HCB_ONLINE:
885 case RPSM_EVENT_LOGO_RCVD: 1028 case RPSM_EVENT_LOGO_RCVD:
886 case RPSM_EVENT_PRLO_RCVD: 1029 case RPSM_EVENT_PRLO_RCVD:
@@ -945,6 +1088,8 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
945 bfa_fcs_rport_hal_offline(rport); 1088 bfa_fcs_rport_hal_offline(rport);
946 break; 1089 break;
947 1090
1091 case RPSM_EVENT_SCN_ONLINE:
1092 break;
948 case RPSM_EVENT_LOGO_RCVD: 1093 case RPSM_EVENT_LOGO_RCVD:
949 /* 1094 /*
950 * Rport is going offline. Just ack the logo 1095 * Rport is going offline. Just ack the logo
@@ -956,8 +1101,9 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
956 bfa_fcs_rport_send_prlo_acc(rport); 1101 bfa_fcs_rport_send_prlo_acc(rport);
957 break; 1102 break;
958 1103
1104 case RPSM_EVENT_SCN_OFFLINE:
959 case RPSM_EVENT_HCB_ONLINE: 1105 case RPSM_EVENT_HCB_ONLINE:
960 case RPSM_EVENT_SCN: 1106 case RPSM_EVENT_FAB_SCN:
961 case RPSM_EVENT_LOGO_IMP: 1107 case RPSM_EVENT_LOGO_IMP:
962 case RPSM_EVENT_ADDRESS_CHANGE: 1108 case RPSM_EVENT_ADDRESS_CHANGE:
963 /* 1109 /*
@@ -1015,6 +1161,19 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
1015 bfa_fcs_rport_sm_nsdisc_sending); 1161 bfa_fcs_rport_sm_nsdisc_sending);
1016 rport->ns_retries = 0; 1162 rport->ns_retries = 0;
1017 bfa_fcs_rport_send_nsdisc(rport, NULL); 1163 bfa_fcs_rport_send_nsdisc(rport, NULL);
1164 } else if (bfa_fcport_get_topology(rport->port->fcs->bfa) ==
1165 BFA_PORT_TOPOLOGY_LOOP) {
1166 if (rport->scn_online) {
1167 bfa_sm_set_state(rport,
1168 bfa_fcs_rport_sm_adisc_offline_sending);
1169 bfa_fcs_rport_send_adisc(rport, NULL);
1170 } else {
1171 bfa_sm_set_state(rport,
1172 bfa_fcs_rport_sm_offline);
1173 bfa_timer_start(rport->fcs->bfa, &rport->timer,
1174 bfa_fcs_rport_timeout, rport,
1175 bfa_fcs_rport_del_timeout);
1176 }
1018 } else { 1177 } else {
1019 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); 1178 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1020 rport->plogi_retries = 0; 1179 rport->plogi_retries = 0;
@@ -1027,7 +1186,9 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
1027 bfa_fcs_rport_free(rport); 1186 bfa_fcs_rport_free(rport);
1028 break; 1187 break;
1029 1188
1030 case RPSM_EVENT_SCN: 1189 case RPSM_EVENT_SCN_ONLINE:
1190 case RPSM_EVENT_SCN_OFFLINE:
1191 case RPSM_EVENT_FAB_SCN:
1031 case RPSM_EVENT_LOGO_RCVD: 1192 case RPSM_EVENT_LOGO_RCVD:
1032 case RPSM_EVENT_PRLO_RCVD: 1193 case RPSM_EVENT_PRLO_RCVD:
1033 case RPSM_EVENT_PLOGI_RCVD: 1194 case RPSM_EVENT_PLOGI_RCVD:
@@ -1106,6 +1267,8 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1106 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 1267 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
1107 break; 1268 break;
1108 1269
1270 case RPSM_EVENT_SCN_ONLINE:
1271 case RPSM_EVENT_SCN_OFFLINE:
1109 case RPSM_EVENT_LOGO_RCVD: 1272 case RPSM_EVENT_LOGO_RCVD:
1110 case RPSM_EVENT_PRLO_RCVD: 1273 case RPSM_EVENT_PRLO_RCVD:
1111 /* 1274 /*
@@ -1146,6 +1309,8 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1146 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending); 1309 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
1147 break; 1310 break;
1148 1311
1312 case RPSM_EVENT_SCN_ONLINE:
1313 case RPSM_EVENT_SCN_OFFLINE:
1149 case RPSM_EVENT_ADDRESS_CHANGE: 1314 case RPSM_EVENT_ADDRESS_CHANGE:
1150 break; 1315 break;
1151 1316
@@ -1172,7 +1337,9 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1172 bfa_fcs_rport_free(rport); 1337 bfa_fcs_rport_free(rport);
1173 break; 1338 break;
1174 1339
1175 case RPSM_EVENT_SCN: 1340 case RPSM_EVENT_SCN_ONLINE:
1341 case RPSM_EVENT_SCN_OFFLINE:
1342 case RPSM_EVENT_FAB_SCN:
1176 case RPSM_EVENT_ADDRESS_CHANGE: 1343 case RPSM_EVENT_ADDRESS_CHANGE:
1177 break; 1344 break;
1178 1345
@@ -1209,10 +1376,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1209 bfa_fcs_rport_free(rport); 1376 bfa_fcs_rport_free(rport);
1210 break; 1377 break;
1211 1378
1212 case RPSM_EVENT_SCN: 1379 case RPSM_EVENT_FAB_SCN:
1213 case RPSM_EVENT_ADDRESS_CHANGE: 1380 case RPSM_EVENT_ADDRESS_CHANGE:
1214 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1215 bfa_timer_stop(&rport->timer); 1381 bfa_timer_stop(&rport->timer);
1382 WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
1383 BFA_PORT_TOPOLOGY_LOOP));
1384 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1216 rport->ns_retries = 0; 1385 rport->ns_retries = 0;
1217 bfa_fcs_rport_send_nsdisc(rport, NULL); 1386 bfa_fcs_rport_send_nsdisc(rport, NULL);
1218 break; 1387 break;
@@ -1232,6 +1401,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1232 case RPSM_EVENT_LOGO_RCVD: 1401 case RPSM_EVENT_LOGO_RCVD:
1233 case RPSM_EVENT_PRLO_RCVD: 1402 case RPSM_EVENT_PRLO_RCVD:
1234 case RPSM_EVENT_LOGO_IMP: 1403 case RPSM_EVENT_LOGO_IMP:
1404 case RPSM_EVENT_SCN_OFFLINE:
1235 break; 1405 break;
1236 1406
1237 case RPSM_EVENT_PLOGI_COMP: 1407 case RPSM_EVENT_PLOGI_COMP:
@@ -1240,6 +1410,12 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1240 bfa_fcs_rport_fcs_online_action(rport); 1410 bfa_fcs_rport_fcs_online_action(rport);
1241 break; 1411 break;
1242 1412
1413 case RPSM_EVENT_SCN_ONLINE:
1414 bfa_timer_stop(&rport->timer);
1415 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1416 bfa_fcs_rport_send_plogi(rport, NULL);
1417 break;
1418
1243 case RPSM_EVENT_PLOGI_SEND: 1419 case RPSM_EVENT_PLOGI_SEND:
1244 bfa_timer_stop(&rport->timer); 1420 bfa_timer_stop(&rport->timer);
1245 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); 1421 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
@@ -1280,7 +1456,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1280 bfa_fcs_rport_send_plogiacc(rport, NULL); 1456 bfa_fcs_rport_send_plogiacc(rport, NULL);
1281 break; 1457 break;
1282 1458
1283 case RPSM_EVENT_SCN: 1459 case RPSM_EVENT_FAB_SCN:
1284 case RPSM_EVENT_LOGO_RCVD: 1460 case RPSM_EVENT_LOGO_RCVD:
1285 case RPSM_EVENT_PRLO_RCVD: 1461 case RPSM_EVENT_PRLO_RCVD:
1286 case RPSM_EVENT_PLOGI_SEND: 1462 case RPSM_EVENT_PLOGI_SEND:
@@ -1326,7 +1502,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1326 bfa_fcs_rport_send_nsdisc(rport, NULL); 1502 bfa_fcs_rport_send_nsdisc(rport, NULL);
1327 break; 1503 break;
1328 1504
1329 case RPSM_EVENT_SCN: 1505 case RPSM_EVENT_FAB_SCN:
1330 case RPSM_EVENT_ADDRESS_CHANGE: 1506 case RPSM_EVENT_ADDRESS_CHANGE:
1331 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); 1507 bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
1332 bfa_timer_stop(&rport->timer); 1508 bfa_timer_stop(&rport->timer);
@@ -1439,7 +1615,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1439 case RPSM_EVENT_PRLO_RCVD: 1615 case RPSM_EVENT_PRLO_RCVD:
1440 bfa_fcs_rport_send_prlo_acc(rport); 1616 bfa_fcs_rport_send_prlo_acc(rport);
1441 break; 1617 break;
1442 case RPSM_EVENT_SCN: 1618 case RPSM_EVENT_FAB_SCN:
1443 /* 1619 /*
1444 * ignore, wait for NS query response 1620 * ignore, wait for NS query response
1445 */ 1621 */
@@ -2546,7 +2722,7 @@ void
2546bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) 2722bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2547{ 2723{
2548 rport->stats.rscns++; 2724 rport->stats.rscns++;
2549 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2725 bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN);
2550} 2726}
2551 2727
2552/* 2728/*
@@ -2621,6 +2797,48 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2621 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); 2797 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2622} 2798}
2623 2799
2800void
2801bfa_cb_rport_scn_online(struct bfa_s *bfa)
2802{
2803 struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
2804 struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
2805 struct bfa_fcs_rport_s *rp;
2806 struct list_head *qe;
2807
2808 list_for_each(qe, &port->rport_q) {
2809 rp = (struct bfa_fcs_rport_s *) qe;
2810 bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE);
2811 rp->scn_online = BFA_TRUE;
2812 }
2813
2814 if (bfa_fcs_lport_is_online(port))
2815 bfa_fcs_lport_lip_scn_online(port);
2816}
2817
2818void
2819bfa_cb_rport_scn_no_dev(void *rport)
2820{
2821 struct bfa_fcs_rport_s *rp = rport;
2822
2823 bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
2824 rp->scn_online = BFA_FALSE;
2825}
2826
2827void
2828bfa_cb_rport_scn_offline(struct bfa_s *bfa)
2829{
2830 struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
2831 struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
2832 struct bfa_fcs_rport_s *rp;
2833 struct list_head *qe;
2834
2835 list_for_each(qe, &port->rport_q) {
2836 rp = (struct bfa_fcs_rport_s *) qe;
2837 bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
2838 rp->scn_online = BFA_FALSE;
2839 }
2840}
2841
2624/* 2842/*
2625 * brief 2843 * brief
2626 * This routine is a static BFA callback when there is a QoS priority 2844 * This routine is a static BFA callback when there is a QoS priority
@@ -2808,6 +3026,9 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2808 struct bfa_rport_qos_attr_s qos_attr; 3026 struct bfa_rport_qos_attr_s qos_attr;
2809 struct bfa_fcs_lport_s *port = rport->port; 3027 struct bfa_fcs_lport_s *port = rport->port;
2810 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; 3028 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
3029 struct bfa_port_attr_s port_attr;
3030
3031 bfa_fcport_get_attr(rport->fcs->bfa, &port_attr);
2811 3032
2812 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 3033 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2813 memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); 3034 memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
@@ -2838,7 +3059,8 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2838 rport_speed = 3059 rport_speed =
2839 bfa_fcport_get_ratelim_speed(rport->fcs->bfa); 3060 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
2840 3061
2841 if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) 3062 if ((bfa_fcs_lport_get_rport_max_speed(port) !=
3063 BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed))
2842 rport_attr->trl_enforced = BFA_TRUE; 3064 rport_attr->trl_enforced = BFA_TRUE;
2843 } 3065 }
2844} 3066}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 75ca8752b9f4..0116c1032e25 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -731,8 +731,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
731 /* 731 /*
732 * Unlock the hw semaphore. Should be here only once per boot. 732 * Unlock the hw semaphore. Should be here only once per boot.
733 */ 733 */
734 readl(iocpf->ioc->ioc_regs.ioc_sem_reg); 734 bfa_ioc_ownership_reset(iocpf->ioc);
735 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
736 735
737 /* 736 /*
738 * unlock init semaphore. 737 * unlock init semaphore.
@@ -1751,6 +1750,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1751 attr->card_type = be32_to_cpu(attr->card_type); 1750 attr->card_type = be32_to_cpu(attr->card_type);
1752 attr->maxfrsize = be16_to_cpu(attr->maxfrsize); 1751 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1753 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); 1752 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1753 attr->mfg_year = be16_to_cpu(attr->mfg_year);
1754 1754
1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1756} 1756}
@@ -2497,6 +2497,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc); 2497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498 ad_attr->trunk_capable = (ad_attr->nports > 1) && 2498 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; 2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2500 ad_attr->mfg_day = ioc_attr->mfg_day;
2501 ad_attr->mfg_month = ioc_attr->mfg_month;
2502 ad_attr->mfg_year = ioc_attr->mfg_year;
2500} 2503}
2501 2504
2502enum bfa_ioc_type_e 2505enum bfa_ioc_type_e
@@ -2923,7 +2926,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2923 return; 2926 return;
2924 } 2927 }
2925 2928
2926 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) 2929 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
2927 bfa_iocpf_timeout(ioc); 2930 bfa_iocpf_timeout(ioc);
2928 else { 2931 else {
2929 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 2932 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
@@ -3016,7 +3019,6 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3016 struct bfa_ablk_cfg_inst_s *cfg_inst; 3019 struct bfa_ablk_cfg_inst_s *cfg_inst;
3017 int i, j; 3020 int i, j;
3018 u16 be16; 3021 u16 be16;
3019 u32 be32;
3020 3022
3021 for (i = 0; i < BFA_ABLK_MAX; i++) { 3023 for (i = 0; i < BFA_ABLK_MAX; i++) {
3022 cfg_inst = &cfg->inst[i]; 3024 cfg_inst = &cfg->inst[i];
@@ -3027,8 +3029,10 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3027 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); 3029 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3028 be16 = cfg_inst->pf_cfg[j].num_vectors; 3030 be16 = cfg_inst->pf_cfg[j].num_vectors;
3029 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); 3031 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3030 be32 = cfg_inst->pf_cfg[j].bw; 3032 be16 = cfg_inst->pf_cfg[j].bw_min;
3031 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); 3033 cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3034 be16 = cfg_inst->pf_cfg[j].bw_max;
3035 cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3032 } 3036 }
3033 } 3037 }
3034} 3038}
@@ -3170,7 +3174,8 @@ bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3170 3174
3171bfa_status_t 3175bfa_status_t
3172bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, 3176bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3173 u8 port, enum bfi_pcifn_class personality, int bw, 3177 u8 port, enum bfi_pcifn_class personality,
3178 u16 bw_min, u16 bw_max,
3174 bfa_ablk_cbfn_t cbfn, void *cbarg) 3179 bfa_ablk_cbfn_t cbfn, void *cbarg)
3175{ 3180{
3176 struct bfi_ablk_h2i_pf_req_s *m; 3181 struct bfi_ablk_h2i_pf_req_s *m;
@@ -3194,7 +3199,8 @@ bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3194 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, 3199 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3195 bfa_ioc_portid(ablk->ioc)); 3200 bfa_ioc_portid(ablk->ioc));
3196 m->pers = cpu_to_be16((u16)personality); 3201 m->pers = cpu_to_be16((u16)personality);
3197 m->bw = cpu_to_be32(bw); 3202 m->bw_min = cpu_to_be16(bw_min);
3203 m->bw_max = cpu_to_be16(bw_max);
3198 m->port = port; 3204 m->port = port;
3199 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); 3205 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3200 3206
@@ -3294,8 +3300,8 @@ bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3294} 3300}
3295 3301
3296bfa_status_t 3302bfa_status_t
3297bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, 3303bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3298 bfa_ablk_cbfn_t cbfn, void *cbarg) 3304 u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3299{ 3305{
3300 struct bfi_ablk_h2i_pf_req_s *m; 3306 struct bfi_ablk_h2i_pf_req_s *m;
3301 3307
@@ -3317,7 +3323,8 @@ bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3317 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, 3323 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3318 bfa_ioc_portid(ablk->ioc)); 3324 bfa_ioc_portid(ablk->ioc));
3319 m->pcifn = (u8)pcifn; 3325 m->pcifn = (u8)pcifn;
3320 m->bw = cpu_to_be32(bw); 3326 m->bw_min = cpu_to_be16(bw_min);
3327 m->bw_max = cpu_to_be16(bw_max);
3321 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); 3328 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3322 3329
3323 return BFA_STATUS_OK; 3330 return BFA_STATUS_OK;
@@ -4680,22 +4687,25 @@ diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4680 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); 4687 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4681 diag->tsensor.temp->ts_junc = rsp->ts_junc; 4688 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4682 diag->tsensor.temp->ts_brd = rsp->ts_brd; 4689 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4683 diag->tsensor.temp->status = BFA_STATUS_OK;
4684 4690
4685 if (rsp->ts_brd) { 4691 if (rsp->ts_brd) {
4692 /* tsensor.temp->status is brd_temp status */
4693 diag->tsensor.temp->status = rsp->status;
4686 if (rsp->status == BFA_STATUS_OK) { 4694 if (rsp->status == BFA_STATUS_OK) {
4687 diag->tsensor.temp->brd_temp = 4695 diag->tsensor.temp->brd_temp =
4688 be16_to_cpu(rsp->brd_temp); 4696 be16_to_cpu(rsp->brd_temp);
4689 } else { 4697 } else
4690 bfa_trc(diag, rsp->status);
4691 diag->tsensor.temp->brd_temp = 0; 4698 diag->tsensor.temp->brd_temp = 0;
4692 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4693 }
4694 } 4699 }
4700
4701 bfa_trc(diag, rsp->status);
4695 bfa_trc(diag, rsp->ts_junc); 4702 bfa_trc(diag, rsp->ts_junc);
4696 bfa_trc(diag, rsp->temp); 4703 bfa_trc(diag, rsp->temp);
4697 bfa_trc(diag, rsp->ts_brd); 4704 bfa_trc(diag, rsp->ts_brd);
4698 bfa_trc(diag, rsp->brd_temp); 4705 bfa_trc(diag, rsp->brd_temp);
4706
4707 /* tsensor status is always good bcos we always have junction temp */
4708 diag->tsensor.status = BFA_STATUS_OK;
4699 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); 4709 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4700 diag->tsensor.lock = 0; 4710 diag->tsensor.lock = 0;
4701} 4711}
@@ -4924,6 +4934,7 @@ bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4924 diag->tsensor.temp = result; 4934 diag->tsensor.temp = result;
4925 diag->tsensor.cbfn = cbfn; 4935 diag->tsensor.cbfn = cbfn;
4926 diag->tsensor.cbarg = cbarg; 4936 diag->tsensor.cbarg = cbarg;
4937 diag->tsensor.status = BFA_STATUS_OK;
4927 4938
4928 /* Send msg to fw */ 4939 /* Send msg to fw */
4929 diag_tempsensor_send(diag); 4940 diag_tempsensor_send(diag);
@@ -5615,7 +5626,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5615 } 5626 }
5616 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); 5627 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5617 bfa_timer_start(dconf->bfa, &dconf->timer, 5628 bfa_timer_start(dconf->bfa, &dconf->timer,
5618 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); 5629 bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5619 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), 5630 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5620 BFA_FLASH_PART_DRV, dconf->instance, 5631 BFA_FLASH_PART_DRV, dconf->instance,
5621 dconf->dconf, 5632 dconf->dconf,
@@ -5655,7 +5666,7 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5655 break; 5666 break;
5656 case BFA_DCONF_SM_TIMEOUT: 5667 case BFA_DCONF_SM_TIMEOUT:
5657 bfa_sm_set_state(dconf, bfa_dconf_sm_ready); 5668 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5658 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); 5669 bfa_ioc_suspend(&dconf->bfa->ioc);
5659 break; 5670 break;
5660 case BFA_DCONF_SM_EXIT: 5671 case BFA_DCONF_SM_EXIT:
5661 bfa_timer_stop(&dconf->timer); 5672 bfa_timer_stop(&dconf->timer);
@@ -5853,7 +5864,6 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
5853 struct bfa_s *bfa = arg; 5864 struct bfa_s *bfa = arg;
5854 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); 5865 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5855 5866
5856 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5857 if (status == BFA_STATUS_OK) { 5867 if (status == BFA_STATUS_OK) {
5858 bfa_dconf_read_data_valid(bfa) = BFA_TRUE; 5868 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5859 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) 5869 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5861,6 +5871,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
5861 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) 5871 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5862 dconf->dconf->hdr.version = BFI_DCONF_VERSION; 5872 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5863 } 5873 }
5874 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5864 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); 5875 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5865} 5876}
5866 5877
@@ -5945,3 +5956,448 @@ bfa_dconf_modexit(struct bfa_s *bfa)
5945 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); 5956 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5946 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); 5957 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5947} 5958}
5959
5960/*
5961 * FRU specific functions
5962 */
5963
5964#define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5965#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
5966#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
5967
5968static void
5969bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
5970{
5971 struct bfa_fru_s *fru = cbarg;
5972
5973 bfa_trc(fru, event);
5974
5975 switch (event) {
5976 case BFA_IOC_E_DISABLED:
5977 case BFA_IOC_E_FAILED:
5978 if (fru->op_busy) {
5979 fru->status = BFA_STATUS_IOC_FAILURE;
5980 fru->cbfn(fru->cbarg, fru->status);
5981 fru->op_busy = 0;
5982 }
5983 break;
5984
5985 default:
5986 break;
5987 }
5988}
5989
5990/*
5991 * Send fru write request.
5992 *
5993 * @param[in] cbarg - callback argument
5994 */
5995static void
5996bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
5997{
5998 struct bfa_fru_s *fru = cbarg;
5999 struct bfi_fru_write_req_s *msg =
6000 (struct bfi_fru_write_req_s *) fru->mb.msg;
6001 u32 len;
6002
6003 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6004 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6005 fru->residue : BFA_FRU_DMA_BUF_SZ;
6006 msg->length = cpu_to_be32(len);
6007
6008 /*
6009 * indicate if it's the last msg of the whole write operation
6010 */
6011 msg->last = (len == fru->residue) ? 1 : 0;
6012
6013 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6014 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6015
6016 memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6017 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6018
6019 fru->residue -= len;
6020 fru->offset += len;
6021}
6022
6023/*
6024 * Send fru read request.
6025 *
6026 * @param[in] cbarg - callback argument
6027 */
6028static void
6029bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6030{
6031 struct bfa_fru_s *fru = cbarg;
6032 struct bfi_fru_read_req_s *msg =
6033 (struct bfi_fru_read_req_s *) fru->mb.msg;
6034 u32 len;
6035
6036 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6037 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6038 fru->residue : BFA_FRU_DMA_BUF_SZ;
6039 msg->length = cpu_to_be32(len);
6040 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6041 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6042 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6043}
6044
6045/*
6046 * Flash memory info API.
6047 *
6048 * @param[in] mincfg - minimal cfg variable
6049 */
6050u32
6051bfa_fru_meminfo(bfa_boolean_t mincfg)
6052{
6053 /* min driver doesn't need fru */
6054 if (mincfg)
6055 return 0;
6056
6057 return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6058}
6059
6060/*
6061 * Flash attach API.
6062 *
6063 * @param[in] fru - fru structure
6064 * @param[in] ioc - ioc structure
6065 * @param[in] dev - device structure
6066 * @param[in] trcmod - trace module
6067 * @param[in] logmod - log module
6068 */
6069void
6070bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6071 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6072{
6073 fru->ioc = ioc;
6074 fru->trcmod = trcmod;
6075 fru->cbfn = NULL;
6076 fru->cbarg = NULL;
6077 fru->op_busy = 0;
6078
6079 bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6080 bfa_q_qe_init(&fru->ioc_notify);
6081 bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6082 list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6083
6084 /* min driver doesn't need fru */
6085 if (mincfg) {
6086 fru->dbuf_kva = NULL;
6087 fru->dbuf_pa = 0;
6088 }
6089}
6090
6091/*
6092 * Claim memory for fru
6093 *
6094 * @param[in] fru - fru structure
6095 * @param[in] dm_kva - pointer to virtual memory address
6096 * @param[in] dm_pa - frusical memory address
6097 * @param[in] mincfg - minimal cfg variable
6098 */
6099void
6100bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6101 bfa_boolean_t mincfg)
6102{
6103 if (mincfg)
6104 return;
6105
6106 fru->dbuf_kva = dm_kva;
6107 fru->dbuf_pa = dm_pa;
6108 memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6109 dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6110 dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6111}
6112
6113/*
6114 * Update fru vpd image.
6115 *
6116 * @param[in] fru - fru structure
6117 * @param[in] buf - update data buffer
6118 * @param[in] len - data buffer length
6119 * @param[in] offset - offset relative to starting address
6120 * @param[in] cbfn - callback function
6121 * @param[in] cbarg - callback argument
6122 *
6123 * Return status.
6124 */
6125bfa_status_t
6126bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6127 bfa_cb_fru_t cbfn, void *cbarg)
6128{
6129 bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6130 bfa_trc(fru, len);
6131 bfa_trc(fru, offset);
6132
6133 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6134 return BFA_STATUS_FRU_NOT_PRESENT;
6135
6136 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6137 return BFA_STATUS_CMD_NOTSUPP;
6138
6139 if (!bfa_ioc_is_operational(fru->ioc))
6140 return BFA_STATUS_IOC_NON_OP;
6141
6142 if (fru->op_busy) {
6143 bfa_trc(fru, fru->op_busy);
6144 return BFA_STATUS_DEVBUSY;
6145 }
6146
6147 fru->op_busy = 1;
6148
6149 fru->cbfn = cbfn;
6150 fru->cbarg = cbarg;
6151 fru->residue = len;
6152 fru->offset = 0;
6153 fru->addr_off = offset;
6154 fru->ubuf = buf;
6155
6156 bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6157
6158 return BFA_STATUS_OK;
6159}
6160
6161/*
6162 * Read fru vpd image.
6163 *
6164 * @param[in] fru - fru structure
6165 * @param[in] buf - read data buffer
6166 * @param[in] len - data buffer length
6167 * @param[in] offset - offset relative to starting address
6168 * @param[in] cbfn - callback function
6169 * @param[in] cbarg - callback argument
6170 *
6171 * Return status.
6172 */
6173bfa_status_t
6174bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6175 bfa_cb_fru_t cbfn, void *cbarg)
6176{
6177 bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6178 bfa_trc(fru, len);
6179 bfa_trc(fru, offset);
6180
6181 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6182 return BFA_STATUS_FRU_NOT_PRESENT;
6183
6184 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6185 return BFA_STATUS_CMD_NOTSUPP;
6186
6187 if (!bfa_ioc_is_operational(fru->ioc))
6188 return BFA_STATUS_IOC_NON_OP;
6189
6190 if (fru->op_busy) {
6191 bfa_trc(fru, fru->op_busy);
6192 return BFA_STATUS_DEVBUSY;
6193 }
6194
6195 fru->op_busy = 1;
6196
6197 fru->cbfn = cbfn;
6198 fru->cbarg = cbarg;
6199 fru->residue = len;
6200 fru->offset = 0;
6201 fru->addr_off = offset;
6202 fru->ubuf = buf;
6203 bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6204
6205 return BFA_STATUS_OK;
6206}
6207
6208/*
6209 * Get maximum size fru vpd image.
6210 *
6211 * @param[in] fru - fru structure
6212 * @param[out] size - maximum size of fru vpd data
6213 *
6214 * Return status.
6215 */
6216bfa_status_t
6217bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6218{
6219 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6220 return BFA_STATUS_FRU_NOT_PRESENT;
6221
6222 if (!bfa_ioc_is_operational(fru->ioc))
6223 return BFA_STATUS_IOC_NON_OP;
6224
6225 if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK)
6226 *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6227 else
6228 return BFA_STATUS_CMD_NOTSUPP;
6229 return BFA_STATUS_OK;
6230}
6231/*
6232 * tfru write.
6233 *
6234 * @param[in] fru - fru structure
6235 * @param[in] buf - update data buffer
6236 * @param[in] len - data buffer length
6237 * @param[in] offset - offset relative to starting address
6238 * @param[in] cbfn - callback function
6239 * @param[in] cbarg - callback argument
6240 *
6241 * Return status.
6242 */
6243bfa_status_t
6244bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6245 bfa_cb_fru_t cbfn, void *cbarg)
6246{
6247 bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6248 bfa_trc(fru, len);
6249 bfa_trc(fru, offset);
6250 bfa_trc(fru, *((u8 *) buf));
6251
6252 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6253 return BFA_STATUS_FRU_NOT_PRESENT;
6254
6255 if (!bfa_ioc_is_operational(fru->ioc))
6256 return BFA_STATUS_IOC_NON_OP;
6257
6258 if (fru->op_busy) {
6259 bfa_trc(fru, fru->op_busy);
6260 return BFA_STATUS_DEVBUSY;
6261 }
6262
6263 fru->op_busy = 1;
6264
6265 fru->cbfn = cbfn;
6266 fru->cbarg = cbarg;
6267 fru->residue = len;
6268 fru->offset = 0;
6269 fru->addr_off = offset;
6270 fru->ubuf = buf;
6271
6272 bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6273
6274 return BFA_STATUS_OK;
6275}
6276
6277/*
6278 * tfru read.
6279 *
6280 * @param[in] fru - fru structure
6281 * @param[in] buf - read data buffer
6282 * @param[in] len - data buffer length
6283 * @param[in] offset - offset relative to starting address
6284 * @param[in] cbfn - callback function
6285 * @param[in] cbarg - callback argument
6286 *
6287 * Return status.
6288 */
6289bfa_status_t
6290bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6291 bfa_cb_fru_t cbfn, void *cbarg)
6292{
6293 bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6294 bfa_trc(fru, len);
6295 bfa_trc(fru, offset);
6296
6297 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6298 return BFA_STATUS_FRU_NOT_PRESENT;
6299
6300 if (!bfa_ioc_is_operational(fru->ioc))
6301 return BFA_STATUS_IOC_NON_OP;
6302
6303 if (fru->op_busy) {
6304 bfa_trc(fru, fru->op_busy);
6305 return BFA_STATUS_DEVBUSY;
6306 }
6307
6308 fru->op_busy = 1;
6309
6310 fru->cbfn = cbfn;
6311 fru->cbarg = cbarg;
6312 fru->residue = len;
6313 fru->offset = 0;
6314 fru->addr_off = offset;
6315 fru->ubuf = buf;
6316 bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6317
6318 return BFA_STATUS_OK;
6319}
6320
6321/*
6322 * Process fru response messages upon receiving interrupts.
6323 *
6324 * @param[in] fruarg - fru structure
6325 * @param[in] msg - message structure
6326 */
6327void
6328bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6329{
6330 struct bfa_fru_s *fru = fruarg;
6331 struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6332 u32 status;
6333
6334 bfa_trc(fru, msg->mh.msg_id);
6335
6336 if (!fru->op_busy) {
6337 /*
6338 * receiving response after ioc failure
6339 */
6340 bfa_trc(fru, 0x9999);
6341 return;
6342 }
6343
6344 switch (msg->mh.msg_id) {
6345 case BFI_FRUVPD_I2H_WRITE_RSP:
6346 case BFI_TFRU_I2H_WRITE_RSP:
6347 status = be32_to_cpu(rsp->status);
6348 bfa_trc(fru, status);
6349
6350 if (status != BFA_STATUS_OK || fru->residue == 0) {
6351 fru->status = status;
6352 fru->op_busy = 0;
6353 if (fru->cbfn)
6354 fru->cbfn(fru->cbarg, fru->status);
6355 } else {
6356 bfa_trc(fru, fru->offset);
6357 if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6358 bfa_fru_write_send(fru,
6359 BFI_FRUVPD_H2I_WRITE_REQ);
6360 else
6361 bfa_fru_write_send(fru,
6362 BFI_TFRU_H2I_WRITE_REQ);
6363 }
6364 break;
6365 case BFI_FRUVPD_I2H_READ_RSP:
6366 case BFI_TFRU_I2H_READ_RSP:
6367 status = be32_to_cpu(rsp->status);
6368 bfa_trc(fru, status);
6369
6370 if (status != BFA_STATUS_OK) {
6371 fru->status = status;
6372 fru->op_busy = 0;
6373 if (fru->cbfn)
6374 fru->cbfn(fru->cbarg, fru->status);
6375 } else {
6376 u32 len = be32_to_cpu(rsp->length);
6377
6378 bfa_trc(fru, fru->offset);
6379 bfa_trc(fru, len);
6380
6381 memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6382 fru->residue -= len;
6383 fru->offset += len;
6384
6385 if (fru->residue == 0) {
6386 fru->status = status;
6387 fru->op_busy = 0;
6388 if (fru->cbfn)
6389 fru->cbfn(fru->cbarg, fru->status);
6390 } else {
6391 if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6392 bfa_fru_read_send(fru,
6393 BFI_FRUVPD_H2I_READ_REQ);
6394 else
6395 bfa_fru_read_send(fru,
6396 BFI_TFRU_H2I_READ_REQ);
6397 }
6398 }
6399 break;
6400 default:
6401 WARN_ON(1);
6402 }
6403}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index b2856f96567c..23a90e7b7107 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -702,6 +702,55 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
702void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); 702void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
703 703
704/* 704/*
705 * FRU module specific
706 */
707typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
708
709struct bfa_fru_s {
710 struct bfa_ioc_s *ioc; /* back pointer to ioc */
711 struct bfa_trc_mod_s *trcmod; /* trace module */
712 u8 op_busy; /* operation busy flag */
713 u8 rsv[3];
714 u32 residue; /* residual length */
715 u32 offset; /* offset */
716 bfa_status_t status; /* status */
717 u8 *dbuf_kva; /* dma buf virtual address */
718 u64 dbuf_pa; /* dma buf physical address */
719 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
720 bfa_cb_fru_t cbfn; /* user callback function */
721 void *cbarg; /* user callback arg */
722 u8 *ubuf; /* user supplied buffer */
723 struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
724 u32 addr_off; /* fru address offset */
725 struct bfa_mbox_cmd_s mb; /* mailbox */
726 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
727 struct bfa_mem_dma_s fru_dma;
728};
729
730#define BFA_FRU(__bfa) (&(__bfa)->modules.fru)
731#define BFA_MEM_FRU_DMA(__bfa) (&(BFA_FRU(__bfa)->fru_dma))
732
733bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
734 void *buf, u32 len, u32 offset,
735 bfa_cb_fru_t cbfn, void *cbarg);
736bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
737 void *buf, u32 len, u32 offset,
738 bfa_cb_fru_t cbfn, void *cbarg);
739bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
740bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
741 void *buf, u32 len, u32 offset,
742 bfa_cb_fru_t cbfn, void *cbarg);
743bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
744 void *buf, u32 len, u32 offset,
745 bfa_cb_fru_t cbfn, void *cbarg);
746u32 bfa_fru_meminfo(bfa_boolean_t mincfg);
747void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
748 void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
749void bfa_fru_memclaim(struct bfa_fru_s *fru,
750 u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
751void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
752
753/*
705 * Driver Config( dconf) specific 754 * Driver Config( dconf) specific
706 */ 755 */
707#define BFI_DCONF_SIGNATURE 0xabcdabcd 756#define BFI_DCONF_SIGNATURE 0xabcdabcd
@@ -716,6 +765,7 @@ struct bfa_dconf_hdr_s {
716struct bfa_dconf_s { 765struct bfa_dconf_s {
717 struct bfa_dconf_hdr_s hdr; 766 struct bfa_dconf_hdr_s hdr;
718 struct bfa_lunmask_cfg_s lun_mask; 767 struct bfa_lunmask_cfg_s lun_mask;
768 struct bfa_throttle_cfg_s throttle_cfg;
719}; 769};
720#pragma pack() 770#pragma pack()
721 771
@@ -738,6 +788,8 @@ struct bfa_dconf_mod_s {
738#define bfa_dconf_read_data_valid(__bfa) \ 788#define bfa_dconf_read_data_valid(__bfa) \
739 (BFA_DCONF_MOD(__bfa)->read_data_valid) 789 (BFA_DCONF_MOD(__bfa)->read_data_valid)
740#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ 790#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
791#define bfa_dconf_get_min_cfg(__bfa) \
792 (BFA_DCONF_MOD(__bfa)->min_cfg)
741 793
742void bfa_dconf_modinit(struct bfa_s *bfa); 794void bfa_dconf_modinit(struct bfa_s *bfa);
743void bfa_dconf_modexit(struct bfa_s *bfa); 795void bfa_dconf_modexit(struct bfa_s *bfa);
@@ -761,7 +813,8 @@ bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
761#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) 813#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
762#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 814#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
763#define bfa_ioc_speed_sup(__ioc) \ 815#define bfa_ioc_speed_sup(__ioc) \
764 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 816 ((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS : \
817 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
765#define bfa_ioc_get_nports(__ioc) \ 818#define bfa_ioc_get_nports(__ioc) \
766 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) 819 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
767 820
@@ -885,12 +938,12 @@ bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
885 enum bfa_mode_s mode, int max_pf, int max_vf, 938 enum bfa_mode_s mode, int max_pf, int max_vf,
886 bfa_ablk_cbfn_t cbfn, void *cbarg); 939 bfa_ablk_cbfn_t cbfn, void *cbarg);
887bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, 940bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
888 u8 port, enum bfi_pcifn_class personality, int bw, 941 u8 port, enum bfi_pcifn_class personality,
889 bfa_ablk_cbfn_t cbfn, void *cbarg); 942 u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
890bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn, 943bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
891 bfa_ablk_cbfn_t cbfn, void *cbarg); 944 bfa_ablk_cbfn_t cbfn, void *cbarg);
892bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, 945bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
893 bfa_ablk_cbfn_t cbfn, void *cbarg); 946 u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
894bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, 947bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
895 bfa_ablk_cbfn_t cbfn, void *cbarg); 948 bfa_ablk_cbfn_t cbfn, void *cbarg);
896bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, 949bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 2eb0c6a2938d..de4e726a1263 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -57,13 +57,6 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
57 u32 usecnt; 57 u32 usecnt;
58 struct bfi_ioc_image_hdr_s fwhdr; 58 struct bfi_ioc_image_hdr_s fwhdr;
59 59
60 /*
61 * If bios boot (flash based) -- do not increment usage count
62 */
63 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
64 BFA_IOC_FWIMG_MINSZ)
65 return BFA_TRUE;
66
67 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 60 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
68 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 61 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
69 62
@@ -115,13 +108,6 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
115 u32 usecnt; 108 u32 usecnt;
116 109
117 /* 110 /*
118 * If bios boot (flash based) -- do not decrement usage count
119 */
120 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
121 BFA_IOC_FWIMG_MINSZ)
122 return;
123
124 /*
125 * decrement usage count 111 * decrement usage count
126 */ 112 */
127 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 113 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -400,13 +386,12 @@ static void
400bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 386bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
401{ 387{
402 388
403 if (bfa_ioc_is_cna(ioc)) { 389 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
404 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 390 writel(0, ioc->ioc_regs.ioc_usage_reg);
405 writel(0, ioc->ioc_regs.ioc_usage_reg); 391 readl(ioc->ioc_regs.ioc_usage_sem_reg);
406 readl(ioc->ioc_regs.ioc_usage_sem_reg); 392 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
407 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
408 }
409 393
394 writel(0, ioc->ioc_regs.ioc_fail_sync);
410 /* 395 /*
411 * Read the hw sem reg to make sure that it is locked 396 * Read the hw sem reg to make sure that it is locked
412 * before we clear it. If it is not locked, writing 1 397 * before we clear it. If it is not locked, writing 1
@@ -759,25 +744,6 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
759void 744void
760bfa_ioc_ct2_mac_reset(void __iomem *rb) 745bfa_ioc_ct2_mac_reset(void __iomem *rb)
761{ 746{
762 u32 r32;
763
764 bfa_ioc_ct2_sclk_init(rb);
765 bfa_ioc_ct2_lclk_init(rb);
766
767 /*
768 * release soft reset on s_clk & l_clk
769 */
770 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
771 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
772 (rb + CT2_APP_PLL_SCLK_CTL_REG));
773
774 /*
775 * release soft reset on s_clk & l_clk
776 */
777 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
778 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
779 (rb + CT2_APP_PLL_LCLK_CTL_REG));
780
781 /* put port0, port1 MAC & AHB in reset */ 747 /* put port0, port1 MAC & AHB in reset */
782 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), 748 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
783 rb + CT2_CSI_MAC_CONTROL_REG(0)); 749 rb + CT2_CSI_MAC_CONTROL_REG(0));
@@ -785,8 +751,21 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
785 rb + CT2_CSI_MAC_CONTROL_REG(1)); 751 rb + CT2_CSI_MAC_CONTROL_REG(1));
786} 752}
787 753
754static void
755bfa_ioc_ct2_enable_flash(void __iomem *rb)
756{
757 u32 r32;
758
759 r32 = readl((rb + PSS_GPIO_OUT_REG));
760 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
761 r32 = readl((rb + PSS_GPIO_OE_REG));
762 writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
763}
764
788#define CT2_NFC_MAX_DELAY 1000 765#define CT2_NFC_MAX_DELAY 1000
789#define CT2_NFC_VER_VALID 0x143 766#define CT2_NFC_PAUSE_MAX_DELAY 4000
767#define CT2_NFC_VER_VALID 0x147
768#define CT2_NFC_STATE_RUNNING 0x20000001
790#define BFA_IOC_PLL_POLL 1000000 769#define BFA_IOC_PLL_POLL 1000000
791 770
792static bfa_boolean_t 771static bfa_boolean_t
@@ -802,6 +781,20 @@ bfa_ioc_ct2_nfc_halted(void __iomem *rb)
802} 781}
803 782
804static void 783static void
784bfa_ioc_ct2_nfc_halt(void __iomem *rb)
785{
786 int i;
787
788 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
789 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
790 if (bfa_ioc_ct2_nfc_halted(rb))
791 break;
792 udelay(1000);
793 }
794 WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
795}
796
797static void
805bfa_ioc_ct2_nfc_resume(void __iomem *rb) 798bfa_ioc_ct2_nfc_resume(void __iomem *rb)
806{ 799{
807 u32 r32; 800 u32 r32;
@@ -817,105 +810,142 @@ bfa_ioc_ct2_nfc_resume(void __iomem *rb)
817 WARN_ON(1); 810 WARN_ON(1);
818} 811}
819 812
820bfa_status_t 813static void
821bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 814bfa_ioc_ct2_clk_reset(void __iomem *rb)
822{ 815{
823 u32 wgn, r32, nfc_ver, i; 816 u32 r32;
824 817
825 wgn = readl(rb + CT2_WGN_STATUS); 818 bfa_ioc_ct2_sclk_init(rb);
826 nfc_ver = readl(rb + CT2_RSC_GPR15_REG); 819 bfa_ioc_ct2_lclk_init(rb);
827 820
828 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && 821 /*
829 (nfc_ver >= CT2_NFC_VER_VALID)) { 822 * release soft reset on s_clk & l_clk
830 if (bfa_ioc_ct2_nfc_halted(rb)) 823 */
831 bfa_ioc_ct2_nfc_resume(rb); 824 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
825 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
826 (rb + CT2_APP_PLL_SCLK_CTL_REG));
832 827
833 writel(__RESET_AND_START_SCLK_LCLK_PLLS, 828 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
834 rb + CT2_CSI_FW_CTL_SET_REG); 829 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
830 (rb + CT2_APP_PLL_LCLK_CTL_REG));
835 831
836 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 832}
837 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
838 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
839 break;
840 }
841 833
842 WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); 834static void
835bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
836{
837 u32 r32, i;
843 838
844 for (i = 0; i < BFA_IOC_PLL_POLL; i++) { 839 r32 = readl((rb + PSS_CTL_REG));
845 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 840 r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
846 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) 841 writel(r32, (rb + PSS_CTL_REG));
847 break; 842
848 } 843 writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
849 844
850 WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); 845 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
846 r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
847
848 if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
849 break;
850 }
851 WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
852
853 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
854 r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
855
856 if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
857 break;
858 }
859 WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
860
861 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
862 WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
863}
864
865static void
866bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
867{
868 u32 r32;
869 int i;
870
871 if (bfa_ioc_ct2_nfc_halted(rb))
872 bfa_ioc_ct2_nfc_resume(rb);
873 for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
874 r32 = readl(rb + CT2_NFC_STS_REG);
875 if (r32 == CT2_NFC_STATE_RUNNING)
876 return;
851 udelay(1000); 877 udelay(1000);
878 }
852 879
853 r32 = readl(rb + CT2_CSI_FW_CTL_REG); 880 r32 = readl(rb + CT2_NFC_STS_REG);
854 WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); 881 WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
855 } else { 882}
856 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
857 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
858 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
859 if (r32 & __NFC_CONTROLLER_HALTED)
860 break;
861 udelay(1000);
862 }
863 883
864 bfa_ioc_ct2_mac_reset(rb); 884bfa_status_t
865 bfa_ioc_ct2_sclk_init(rb); 885bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
866 bfa_ioc_ct2_lclk_init(rb); 886{
887 u32 wgn, r32, nfc_ver;
867 888
868 /* 889 wgn = readl(rb + CT2_WGN_STATUS);
869 * release soft reset on s_clk & l_clk
870 */
871 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
872 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
873 (rb + CT2_APP_PLL_SCLK_CTL_REG));
874 890
891 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
875 /* 892 /*
876 * release soft reset on s_clk & l_clk 893 * If flash is corrupted, enable flash explicitly
877 */ 894 */
878 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); 895 bfa_ioc_ct2_clk_reset(rb);
879 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, 896 bfa_ioc_ct2_enable_flash(rb);
880 (rb + CT2_APP_PLL_LCLK_CTL_REG));
881 }
882 897
883 /* 898 bfa_ioc_ct2_mac_reset(rb);
884 * Announce flash device presence, if flash was corrupted. 899
885 */ 900 bfa_ioc_ct2_clk_reset(rb);
886 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { 901 bfa_ioc_ct2_enable_flash(rb);
887 r32 = readl(rb + PSS_GPIO_OUT_REG); 902
888 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); 903 } else {
889 r32 = readl(rb + PSS_GPIO_OE_REG); 904 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
890 writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); 905
906 if ((nfc_ver >= CT2_NFC_VER_VALID) &&
907 (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
908
909 bfa_ioc_ct2_wait_till_nfc_running(rb);
910
911 bfa_ioc_ct2_nfc_clk_reset(rb);
912 } else {
913 bfa_ioc_ct2_nfc_halt(rb);
914
915 bfa_ioc_ct2_clk_reset(rb);
916 bfa_ioc_ct2_mac_reset(rb);
917 bfa_ioc_ct2_clk_reset(rb);
918
919 }
891 } 920 }
892 921
893 /* 922 /*
894 * Mask the interrupts and clear any 923 * Mask the interrupts and clear any
895 * pending interrupts. 924 * pending interrupts left by BIOS/EFI
896 */ 925 */
926
897 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 927 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
898 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 928 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
899 929
900 /* For first time initialization, no need to clear interrupts */ 930 /* For first time initialization, no need to clear interrupts */
901 r32 = readl(rb + HOST_SEM5_REG); 931 r32 = readl(rb + HOST_SEM5_REG);
902 if (r32 & 0x1) { 932 if (r32 & 0x1) {
903 r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); 933 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
904 if (r32 == 1) { 934 if (r32 == 1) {
905 writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); 935 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
906 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 936 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
907 } 937 }
908 r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); 938 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
909 if (r32 == 1) { 939 if (r32 == 1) {
910 writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); 940 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
911 readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); 941 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
912 } 942 }
913 } 943 }
914 944
915 bfa_ioc_ct2_mem_init(rb); 945 bfa_ioc_ct2_mem_init(rb);
916 946
917 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); 947 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
918 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); 948 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
919 949
920 return BFA_STATUS_OK; 950 return BFA_STATUS_OK;
921} 951}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 189fff71e3c2..a14c784ff3fc 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -45,6 +45,7 @@ struct bfa_modules_s {
45 struct bfa_diag_s diag_mod; /* diagnostics module */ 45 struct bfa_diag_s diag_mod; /* diagnostics module */
46 struct bfa_phy_s phy; /* phy module */ 46 struct bfa_phy_s phy; /* phy module */
47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */ 47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
48 struct bfa_fru_s fru; /* fru module */
48}; 49};
49 50
50/* 51/*
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 95e4ad8759ac..8ea7697deb9b 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -250,6 +250,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
250 return BFA_STATUS_IOC_FAILURE; 250 return BFA_STATUS_IOC_FAILURE;
251 } 251 }
252 252
253 /* if port is d-port enabled, return error */
254 if (port->dport_enabled) {
255 bfa_trc(port, BFA_STATUS_DPORT_ERR);
256 return BFA_STATUS_DPORT_ERR;
257 }
258
253 if (port->endis_pending) { 259 if (port->endis_pending) {
254 bfa_trc(port, BFA_STATUS_DEVBUSY); 260 bfa_trc(port, BFA_STATUS_DEVBUSY);
255 return BFA_STATUS_DEVBUSY; 261 return BFA_STATUS_DEVBUSY;
@@ -300,6 +306,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
300 return BFA_STATUS_IOC_FAILURE; 306 return BFA_STATUS_IOC_FAILURE;
301 } 307 }
302 308
309 /* if port is d-port enabled, return error */
310 if (port->dport_enabled) {
311 bfa_trc(port, BFA_STATUS_DPORT_ERR);
312 return BFA_STATUS_DPORT_ERR;
313 }
314
303 if (port->endis_pending) { 315 if (port->endis_pending) {
304 bfa_trc(port, BFA_STATUS_DEVBUSY); 316 bfa_trc(port, BFA_STATUS_DEVBUSY);
305 return BFA_STATUS_DEVBUSY; 317 return BFA_STATUS_DEVBUSY;
@@ -431,6 +443,10 @@ bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
431 port->endis_cbfn = NULL; 443 port->endis_cbfn = NULL;
432 port->endis_pending = BFA_FALSE; 444 port->endis_pending = BFA_FALSE;
433 } 445 }
446
447 /* clear D-port mode */
448 if (port->dport_enabled)
449 bfa_port_set_dportenabled(port, BFA_FALSE);
434 break; 450 break;
435 default: 451 default:
436 break; 452 break;
@@ -467,6 +483,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
467 port->stats_cbfn = NULL; 483 port->stats_cbfn = NULL;
468 port->endis_cbfn = NULL; 484 port->endis_cbfn = NULL;
469 port->pbc_disabled = BFA_FALSE; 485 port->pbc_disabled = BFA_FALSE;
486 port->dport_enabled = BFA_FALSE;
470 487
471 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); 488 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
472 bfa_q_qe_init(&port->ioc_notify); 489 bfa_q_qe_init(&port->ioc_notify);
@@ -483,6 +500,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
483} 500}
484 501
485/* 502/*
503 * bfa_port_set_dportenabled();
504 *
505 * Port module- set pbc disabled flag
506 *
507 * @param[in] port - Pointer to the Port module data structure
508 *
509 * @return void
510 */
511void
512bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled)
513{
514 port->dport_enabled = enabled;
515}
516
517/*
486 * CEE module specific definitions 518 * CEE module specific definitions
487 */ 519 */
488 520
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 947f897328d6..2fcab6bc6280 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -45,6 +45,7 @@ struct bfa_port_s {
45 bfa_status_t endis_status; 45 bfa_status_t endis_status;
46 struct bfa_ioc_notify_s ioc_notify; 46 struct bfa_ioc_notify_s ioc_notify;
47 bfa_boolean_t pbc_disabled; 47 bfa_boolean_t pbc_disabled;
48 bfa_boolean_t dport_enabled;
48 struct bfa_mem_dma_s port_dma; 49 struct bfa_mem_dma_s port_dma;
49}; 50};
50 51
@@ -66,6 +67,8 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
66u32 bfa_port_meminfo(void); 67u32 bfa_port_meminfo(void);
67void bfa_port_mem_claim(struct bfa_port_s *port, 68void bfa_port_mem_claim(struct bfa_port_s *port,
68 u8 *dma_kva, u64 dma_pa); 69 u8 *dma_kva, u64 dma_pa);
70void bfa_port_set_dportenabled(struct bfa_port_s *port,
71 bfa_boolean_t enabled);
69 72
70/* 73/*
71 * CEE declaration 74 * CEE declaration
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index b2538d60db34..299c1c889b33 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -67,6 +67,9 @@ enum bfa_fcport_sm_event {
67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ 68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
70 BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
71 BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
72 BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
70}; 73};
71 74
72/* 75/*
@@ -197,6 +200,10 @@ static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
197 enum bfa_fcport_sm_event event); 200 enum bfa_fcport_sm_event event);
198static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, 201static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
199 enum bfa_fcport_sm_event event); 202 enum bfa_fcport_sm_event event);
203static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
200 207
201static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, 208static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event); 209 enum bfa_fcport_ln_sm_event event);
@@ -226,6 +233,8 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
226 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, 233 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
227 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, 234 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
228 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, 235 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
236 {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
237 {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
229}; 238};
230 239
231 240
@@ -1244,6 +1253,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1244 * Just ignore 1253 * Just ignore
1245 */ 1254 */
1246 break; 1255 break;
1256 case BFA_LPS_SM_SET_N2N_PID:
1257 /*
1258 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1259 * this event. Ignore this event.
1260 */
1261 break;
1247 1262
1248 default: 1263 default:
1249 bfa_sm_fault(lps->bfa, event); 1264 bfa_sm_fault(lps->bfa, event);
@@ -1261,6 +1276,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1261 1276
1262 switch (event) { 1277 switch (event) {
1263 case BFA_LPS_SM_FWRSP: 1278 case BFA_LPS_SM_FWRSP:
1279 case BFA_LPS_SM_OFFLINE:
1264 if (lps->status == BFA_STATUS_OK) { 1280 if (lps->status == BFA_STATUS_OK) {
1265 bfa_sm_set_state(lps, bfa_lps_sm_online); 1281 bfa_sm_set_state(lps, bfa_lps_sm_online);
1266 if (lps->fdisc) 1282 if (lps->fdisc)
@@ -1289,7 +1305,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1289 bfa_lps_login_comp(lps); 1305 bfa_lps_login_comp(lps);
1290 break; 1306 break;
1291 1307
1292 case BFA_LPS_SM_OFFLINE:
1293 case BFA_LPS_SM_DELETE: 1308 case BFA_LPS_SM_DELETE:
1294 bfa_sm_set_state(lps, bfa_lps_sm_init); 1309 bfa_sm_set_state(lps, bfa_lps_sm_init);
1295 break; 1310 break;
@@ -2169,6 +2184,12 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2169 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2184 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2170 break; 2185 break;
2171 2186
2187 case BFA_FCPORT_SM_FAA_MISCONFIG:
2188 bfa_fcport_reset_linkinfo(fcport);
2189 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2190 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2191 break;
2192
2172 default: 2193 default:
2173 bfa_sm_fault(fcport->bfa, event); 2194 bfa_sm_fault(fcport->bfa, event);
2174 } 2195 }
@@ -2225,6 +2246,12 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2225 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2246 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2226 break; 2247 break;
2227 2248
2249 case BFA_FCPORT_SM_FAA_MISCONFIG:
2250 bfa_fcport_reset_linkinfo(fcport);
2251 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2252 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2253 break;
2254
2228 default: 2255 default:
2229 bfa_sm_fault(fcport->bfa, event); 2256 bfa_sm_fault(fcport->bfa, event);
2230 } 2257 }
@@ -2250,11 +2277,11 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2250 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 2277 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2251 2278
2252 bfa_trc(fcport->bfa, 2279 bfa_trc(fcport->bfa,
2253 pevent->link_state.vc_fcf.fcf.fipenabled); 2280 pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2254 bfa_trc(fcport->bfa, 2281 bfa_trc(fcport->bfa,
2255 pevent->link_state.vc_fcf.fcf.fipfailed); 2282 pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2256 2283
2257 if (pevent->link_state.vc_fcf.fcf.fipfailed) 2284 if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2258 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2285 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2259 BFA_PL_EID_FIP_FCF_DISC, 0, 2286 BFA_PL_EID_FIP_FCF_DISC, 0,
2260 "FIP FCF Discovery Failed"); 2287 "FIP FCF Discovery Failed");
@@ -2311,6 +2338,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2311 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); 2338 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2312 break; 2339 break;
2313 2340
2341 case BFA_FCPORT_SM_FAA_MISCONFIG:
2342 bfa_fcport_reset_linkinfo(fcport);
2343 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2344 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2345 break;
2346
2314 default: 2347 default:
2315 bfa_sm_fault(fcport->bfa, event); 2348 bfa_sm_fault(fcport->bfa, event);
2316 } 2349 }
@@ -2404,6 +2437,12 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2404 } 2437 }
2405 break; 2438 break;
2406 2439
2440 case BFA_FCPORT_SM_FAA_MISCONFIG:
2441 bfa_fcport_reset_linkinfo(fcport);
2442 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2443 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2444 break;
2445
2407 default: 2446 default:
2408 bfa_sm_fault(fcport->bfa, event); 2447 bfa_sm_fault(fcport->bfa, event);
2409 } 2448 }
@@ -2449,6 +2488,12 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2449 bfa_reqq_wcancel(&fcport->reqq_wait); 2488 bfa_reqq_wcancel(&fcport->reqq_wait);
2450 break; 2489 break;
2451 2490
2491 case BFA_FCPORT_SM_FAA_MISCONFIG:
2492 bfa_fcport_reset_linkinfo(fcport);
2493 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2494 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2495 break;
2496
2452 default: 2497 default:
2453 bfa_sm_fault(fcport->bfa, event); 2498 bfa_sm_fault(fcport->bfa, event);
2454 } 2499 }
@@ -2600,6 +2645,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2600 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); 2645 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2601 break; 2646 break;
2602 2647
2648 case BFA_FCPORT_SM_DPORTENABLE:
2649 bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2650 break;
2651
2603 default: 2652 default:
2604 bfa_sm_fault(fcport->bfa, event); 2653 bfa_sm_fault(fcport->bfa, event);
2605 } 2654 }
@@ -2680,6 +2729,81 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2680 } 2729 }
2681} 2730}
2682 2731
2732static void
2733bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2734{
2735 bfa_trc(fcport->bfa, event);
2736
2737 switch (event) {
2738 case BFA_FCPORT_SM_DPORTENABLE:
2739 case BFA_FCPORT_SM_DISABLE:
2740 case BFA_FCPORT_SM_ENABLE:
2741 case BFA_FCPORT_SM_START:
2742 /*
2743 * Ignore event for a port that is dport
2744 */
2745 break;
2746
2747 case BFA_FCPORT_SM_STOP:
2748 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2749 break;
2750
2751 case BFA_FCPORT_SM_HWFAIL:
2752 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2753 break;
2754
2755 case BFA_FCPORT_SM_DPORTDISABLE:
2756 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2757 break;
2758
2759 default:
2760 bfa_sm_fault(fcport->bfa, event);
2761 }
2762}
2763
2764static void
2765bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2766 enum bfa_fcport_sm_event event)
2767{
2768 bfa_trc(fcport->bfa, event);
2769
2770 switch (event) {
2771 case BFA_FCPORT_SM_DPORTENABLE:
2772 case BFA_FCPORT_SM_ENABLE:
2773 case BFA_FCPORT_SM_START:
2774 /*
2775 * Ignore event for a port as there is FAA misconfig
2776 */
2777 break;
2778
2779 case BFA_FCPORT_SM_DISABLE:
2780 if (bfa_fcport_send_disable(fcport))
2781 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2782 else
2783 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2784
2785 bfa_fcport_reset_linkinfo(fcport);
2786 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2787 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2788 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2789 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2790 break;
2791
2792 case BFA_FCPORT_SM_STOP:
2793 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2794 break;
2795
2796 case BFA_FCPORT_SM_HWFAIL:
2797 bfa_fcport_reset_linkinfo(fcport);
2798 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2799 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2800 break;
2801
2802 default:
2803 bfa_sm_fault(fcport->bfa, event);
2804 }
2805}
2806
2683/* 2807/*
2684 * Link state is down 2808 * Link state is down
2685 */ 2809 */
@@ -2943,6 +3067,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2943 */ 3067 */
2944 do_gettimeofday(&tv); 3068 do_gettimeofday(&tv);
2945 fcport->stats_reset_time = tv.tv_sec; 3069 fcport->stats_reset_time = tv.tv_sec;
3070 fcport->stats_dma_ready = BFA_FALSE;
2946 3071
2947 /* 3072 /*
2948 * initialize and set default configuration 3073 * initialize and set default configuration
@@ -2953,6 +3078,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2953 port_cfg->maxfrsize = 0; 3078 port_cfg->maxfrsize = 0;
2954 3079
2955 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; 3080 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3081 port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3082 port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3083 port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
2956 3084
2957 INIT_LIST_HEAD(&fcport->stats_pending_q); 3085 INIT_LIST_HEAD(&fcport->stats_pending_q);
2958 INIT_LIST_HEAD(&fcport->statsclr_pending_q); 3086 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
@@ -2996,6 +3124,21 @@ bfa_fcport_iocdisable(struct bfa_s *bfa)
2996 bfa_trunk_iocdisable(bfa); 3124 bfa_trunk_iocdisable(bfa);
2997} 3125}
2998 3126
3127/*
3128 * Update loop info in fcport for SCN online
3129 */
3130static void
3131bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3132 struct bfa_fcport_loop_info_s *loop_info)
3133{
3134 fcport->myalpa = loop_info->myalpa;
3135 fcport->alpabm_valid =
3136 loop_info->alpabm_val;
3137 memcpy(fcport->alpabm.alpa_bm,
3138 loop_info->alpabm.alpa_bm,
3139 sizeof(struct fc_alpabm_s));
3140}
3141
2999static void 3142static void
3000bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) 3143bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3001{ 3144{
@@ -3005,12 +3148,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3005 fcport->speed = pevent->link_state.speed; 3148 fcport->speed = pevent->link_state.speed;
3006 fcport->topology = pevent->link_state.topology; 3149 fcport->topology = pevent->link_state.topology;
3007 3150
3008 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) 3151 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3009 fcport->myalpa = 0; 3152 bfa_fcport_update_loop_info(fcport,
3153 &pevent->link_state.attr.loop_info);
3154 return;
3155 }
3010 3156
3011 /* QoS Details */ 3157 /* QoS Details */
3012 fcport->qos_attr = pevent->link_state.qos_attr; 3158 fcport->qos_attr = pevent->link_state.qos_attr;
3013 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; 3159 fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3014 3160
3015 /* 3161 /*
3016 * update trunk state if applicable 3162 * update trunk state if applicable
@@ -3019,7 +3165,8 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3019 trunk->attr.state = BFA_TRUNK_DISABLED; 3165 trunk->attr.state = BFA_TRUNK_DISABLED;
3020 3166
3021 /* update FCoE specific */ 3167 /* update FCoE specific */
3022 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); 3168 fcport->fcoe_vlan =
3169 be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3023 3170
3024 bfa_trc(fcport->bfa, fcport->speed); 3171 bfa_trc(fcport->bfa, fcport->speed);
3025 bfa_trc(fcport->bfa, fcport->topology); 3172 bfa_trc(fcport->bfa, fcport->topology);
@@ -3453,6 +3600,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3453 case BFI_FCPORT_I2H_ENABLE_RSP: 3600 case BFI_FCPORT_I2H_ENABLE_RSP:
3454 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { 3601 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3455 3602
3603 fcport->stats_dma_ready = BFA_TRUE;
3456 if (fcport->use_flash_cfg) { 3604 if (fcport->use_flash_cfg) {
3457 fcport->cfg = i2hmsg.penable_rsp->port_cfg; 3605 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3458 fcport->cfg.maxfrsize = 3606 fcport->cfg.maxfrsize =
@@ -3468,6 +3616,8 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3468 else 3616 else
3469 fcport->trunk.attr.state = 3617 fcport->trunk.attr.state =
3470 BFA_TRUNK_DISABLED; 3618 BFA_TRUNK_DISABLED;
3619 fcport->qos_attr.qos_bw =
3620 i2hmsg.penable_rsp->port_cfg.qos_bw;
3471 fcport->use_flash_cfg = BFA_FALSE; 3621 fcport->use_flash_cfg = BFA_FALSE;
3472 } 3622 }
3473 3623
@@ -3476,6 +3626,9 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3476 else 3626 else
3477 fcport->qos_attr.state = BFA_QOS_DISABLED; 3627 fcport->qos_attr.state = BFA_QOS_DISABLED;
3478 3628
3629 fcport->qos_attr.qos_bw_op =
3630 i2hmsg.penable_rsp->port_cfg.qos_bw;
3631
3479 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3632 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3480 } 3633 }
3481 break; 3634 break;
@@ -3488,8 +3641,17 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3488 case BFI_FCPORT_I2H_EVENT: 3641 case BFI_FCPORT_I2H_EVENT:
3489 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) 3642 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3490 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); 3643 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3491 else 3644 else {
3492 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); 3645 if (i2hmsg.event->link_state.linkstate_rsn ==
3646 BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3647 bfa_sm_send_event(fcport,
3648 BFA_FCPORT_SM_FAA_MISCONFIG);
3649 else
3650 bfa_sm_send_event(fcport,
3651 BFA_FCPORT_SM_LINKDOWN);
3652 }
3653 fcport->qos_attr.qos_bw_op =
3654 i2hmsg.event->link_state.qos_attr.qos_bw_op;
3493 break; 3655 break;
3494 3656
3495 case BFI_FCPORT_I2H_TRUNK_SCN: 3657 case BFI_FCPORT_I2H_TRUNK_SCN:
@@ -3609,6 +3771,9 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3609 3771
3610 if (fcport->cfg.trunked == BFA_TRUE) 3772 if (fcport->cfg.trunked == BFA_TRUE)
3611 return BFA_STATUS_TRUNK_ENABLED; 3773 return BFA_STATUS_TRUNK_ENABLED;
3774 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3775 (speed == BFA_PORT_SPEED_16GBPS))
3776 return BFA_STATUS_UNSUPP_SPEED;
3612 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { 3777 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3613 bfa_trc(bfa, fcport->speed_sup); 3778 bfa_trc(bfa, fcport->speed_sup);
3614 return BFA_STATUS_UNSUPP_SPEED; 3779 return BFA_STATUS_UNSUPP_SPEED;
@@ -3663,7 +3828,26 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3663 3828
3664 switch (topology) { 3829 switch (topology) {
3665 case BFA_PORT_TOPOLOGY_P2P: 3830 case BFA_PORT_TOPOLOGY_P2P:
3831 break;
3832
3666 case BFA_PORT_TOPOLOGY_LOOP: 3833 case BFA_PORT_TOPOLOGY_LOOP:
3834 if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3835 (fcport->qos_attr.state != BFA_QOS_DISABLED))
3836 return BFA_STATUS_ERROR_QOS_ENABLED;
3837 if (fcport->cfg.ratelimit != BFA_FALSE)
3838 return BFA_STATUS_ERROR_TRL_ENABLED;
3839 if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3840 (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3841 return BFA_STATUS_ERROR_TRUNK_ENABLED;
3842 if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3843 (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3844 return BFA_STATUS_UNSUPP_SPEED;
3845 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3846 return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3847 if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3848 return BFA_STATUS_DPORT_ERR;
3849 break;
3850
3667 case BFA_PORT_TOPOLOGY_AUTO: 3851 case BFA_PORT_TOPOLOGY_AUTO:
3668 break; 3852 break;
3669 3853
@@ -3686,6 +3870,17 @@ bfa_fcport_get_topology(struct bfa_s *bfa)
3686 return fcport->topology; 3870 return fcport->topology;
3687} 3871}
3688 3872
3873/**
3874 * Get config topology.
3875 */
3876enum bfa_port_topology
3877bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3878{
3879 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3880
3881 return fcport->cfg.topology;
3882}
3883
3689bfa_status_t 3884bfa_status_t
3690bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 3885bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3691{ 3886{
@@ -3761,9 +3956,11 @@ bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3761u8 3956u8
3762bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) 3957bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3763{ 3958{
3764 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3959 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3960 return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3765 3961
3766 return fcport->cfg.rx_bbcredit; 3962 else
3963 return 0;
3767} 3964}
3768 3965
3769void 3966void
@@ -3850,8 +4047,9 @@ bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3850{ 4047{
3851 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4048 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3852 4049
3853 if (bfa_ioc_is_disabled(&bfa->ioc)) 4050 if (!bfa_iocfc_is_operational(bfa) ||
3854 return BFA_STATUS_IOC_DISABLED; 4051 !fcport->stats_dma_ready)
4052 return BFA_STATUS_IOC_NON_OP;
3855 4053
3856 if (!list_empty(&fcport->statsclr_pending_q)) 4054 if (!list_empty(&fcport->statsclr_pending_q))
3857 return BFA_STATUS_DEVBUSY; 4055 return BFA_STATUS_DEVBUSY;
@@ -3876,6 +4074,10 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3876{ 4074{
3877 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4075 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3878 4076
4077 if (!bfa_iocfc_is_operational(bfa) ||
4078 !fcport->stats_dma_ready)
4079 return BFA_STATUS_IOC_NON_OP;
4080
3879 if (!list_empty(&fcport->stats_pending_q)) 4081 if (!list_empty(&fcport->stats_pending_q))
3880 return BFA_STATUS_DEVBUSY; 4082 return BFA_STATUS_DEVBUSY;
3881 4083
@@ -3905,6 +4107,40 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
3905} 4107}
3906 4108
3907bfa_boolean_t 4109bfa_boolean_t
4110bfa_fcport_is_dport(struct bfa_s *bfa)
4111{
4112 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4113
4114 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4115 BFA_PORT_ST_DPORT);
4116}
4117
4118bfa_status_t
4119bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4120{
4121 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4122 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4123
4124 bfa_trc(bfa, ioc_type);
4125
4126 if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4127 return BFA_STATUS_QOS_BW_INVALID;
4128
4129 if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4130 return BFA_STATUS_QOS_BW_INVALID;
4131
4132 if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4133 (qos_bw->low > qos_bw->high))
4134 return BFA_STATUS_QOS_BW_INVALID;
4135
4136 if ((ioc_type == BFA_IOC_TYPE_FC) &&
4137 (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4138 fcport->cfg.qos_bw = *qos_bw;
4139
4140 return BFA_STATUS_OK;
4141}
4142
4143bfa_boolean_t
3908bfa_fcport_is_ratelim(struct bfa_s *bfa) 4144bfa_fcport_is_ratelim(struct bfa_s *bfa)
3909{ 4145{
3910 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 4146 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
@@ -3981,6 +4217,26 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3981 return fcport->cfg.trunked; 4217 return fcport->cfg.trunked;
3982} 4218}
3983 4219
4220void
4221bfa_fcport_dportenable(struct bfa_s *bfa)
4222{
4223 /*
4224 * Assume caller check for port is in disable state
4225 */
4226 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4227 bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4228}
4229
4230void
4231bfa_fcport_dportdisable(struct bfa_s *bfa)
4232{
4233 /*
4234 * Assume caller check for port is in disable state
4235 */
4236 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4237 bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4238}
4239
3984/* 4240/*
3985 * Rport State machine functions 4241 * Rport State machine functions
3986 */ 4242 */
@@ -4707,6 +4963,21 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4707 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); 4963 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4708 break; 4964 break;
4709 4965
4966 case BFI_RPORT_I2H_LIP_SCN_ONLINE:
4967 bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
4968 &msg.lip_scn->loop_info);
4969 bfa_cb_rport_scn_online(bfa);
4970 break;
4971
4972 case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
4973 bfa_cb_rport_scn_offline(bfa);
4974 break;
4975
4976 case BFI_RPORT_I2H_NO_DEV:
4977 rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
4978 bfa_cb_rport_scn_no_dev(rp->rport_drv);
4979 break;
4980
4710 default: 4981 default:
4711 bfa_trc(bfa, m->mhdr.msg_id); 4982 bfa_trc(bfa, m->mhdr.msg_id);
4712 WARN_ON(1); 4983 WARN_ON(1);
@@ -5348,6 +5619,37 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5348} 5619}
5349 5620
5350/* 5621/*
5622 * Dport forward declaration
5623 */
5624
5625/*
5626 * BFA DPORT state machine events
5627 */
5628enum bfa_dport_sm_event {
5629 BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
5630 BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
5631 BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
5632 BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
5633 BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
5634};
5635
5636static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5637 enum bfa_dport_sm_event event);
5638static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5639 enum bfa_dport_sm_event event);
5640static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5641 enum bfa_dport_sm_event event);
5642static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5643 enum bfa_dport_sm_event event);
5644static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5645 enum bfa_dport_sm_event event);
5646static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5647 enum bfa_dport_sm_event event);
5648static void bfa_dport_qresume(void *cbarg);
5649static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5650 bfi_diag_dport_rsp_t *msg);
5651
5652/*
5351 * BFA fcdiag module 5653 * BFA fcdiag module
5352 */ 5654 */
5353#define BFA_DIAG_QTEST_TOV 1000 /* msec */ 5655#define BFA_DIAG_QTEST_TOV 1000 /* msec */
@@ -5377,15 +5679,24 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5377 struct bfa_pcidev_s *pcidev) 5679 struct bfa_pcidev_s *pcidev)
5378{ 5680{
5379 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5681 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5682 struct bfa_dport_s *dport = &fcdiag->dport;
5683
5380 fcdiag->bfa = bfa; 5684 fcdiag->bfa = bfa;
5381 fcdiag->trcmod = bfa->trcmod; 5685 fcdiag->trcmod = bfa->trcmod;
5382 /* The common DIAG attach bfa_diag_attach() will do all memory claim */ 5686 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5687 dport->bfa = bfa;
5688 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5689 bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5690 dport->cbfn = NULL;
5691 dport->cbarg = NULL;
5383} 5692}
5384 5693
5385static void 5694static void
5386bfa_fcdiag_iocdisable(struct bfa_s *bfa) 5695bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5387{ 5696{
5388 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 5697 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5698 struct bfa_dport_s *dport = &fcdiag->dport;
5699
5389 bfa_trc(fcdiag, fcdiag->lb.lock); 5700 bfa_trc(fcdiag, fcdiag->lb.lock);
5390 if (fcdiag->lb.lock) { 5701 if (fcdiag->lb.lock) {
5391 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE; 5702 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
@@ -5393,6 +5704,8 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5393 fcdiag->lb.lock = 0; 5704 fcdiag->lb.lock = 0;
5394 bfa_fcdiag_set_busy_status(fcdiag); 5705 bfa_fcdiag_set_busy_status(fcdiag);
5395 } 5706 }
5707
5708 bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5396} 5709}
5397 5710
5398static void 5711static void
@@ -5577,6 +5890,9 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5577 case BFI_DIAG_I2H_QTEST: 5890 case BFI_DIAG_I2H_QTEST:
5578 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); 5891 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5579 break; 5892 break;
5893 case BFI_DIAG_I2H_DPORT:
5894 bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
5895 break;
5580 default: 5896 default:
5581 bfa_trc(fcdiag, msg->mhdr.msg_id); 5897 bfa_trc(fcdiag, msg->mhdr.msg_id);
5582 WARN_ON(1); 5898 WARN_ON(1);
@@ -5646,12 +5962,18 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5646 } 5962 }
5647 } 5963 }
5648 5964
5965 /*
5966 * For CT2, 1G is not supported
5967 */
5968 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5969 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5970 bfa_trc(fcdiag, speed);
5971 return BFA_STATUS_UNSUPP_SPEED;
5972 }
5973
5649 /* For Mezz card, port speed entered needs to be checked */ 5974 /* For Mezz card, port speed entered needs to be checked */
5650 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { 5975 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5651 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) { 5976 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5652 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5653 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5654 return BFA_STATUS_UNSUPP_SPEED;
5655 if (!(speed == BFA_PORT_SPEED_1GBPS || 5977 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5656 speed == BFA_PORT_SPEED_2GBPS || 5978 speed == BFA_PORT_SPEED_2GBPS ||
5657 speed == BFA_PORT_SPEED_4GBPS || 5979 speed == BFA_PORT_SPEED_4GBPS ||
@@ -5764,3 +6086,379 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5764 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); 6086 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5765 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK; 6087 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5766} 6088}
6089
6090/*
6091 * D-port
6092 */
6093static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6094 enum bfi_dport_req req);
6095static void
6096bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6097{
6098 if (dport->cbfn != NULL) {
6099 dport->cbfn(dport->cbarg, bfa_status);
6100 dport->cbfn = NULL;
6101 dport->cbarg = NULL;
6102 }
6103}
6104
6105static void
6106bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6107{
6108 bfa_trc(dport->bfa, event);
6109
6110 switch (event) {
6111 case BFA_DPORT_SM_ENABLE:
6112 bfa_fcport_dportenable(dport->bfa);
6113 if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6114 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6115 else
6116 bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6117 break;
6118
6119 case BFA_DPORT_SM_DISABLE:
6120 /* Already disabled */
6121 break;
6122
6123 case BFA_DPORT_SM_HWFAIL:
6124 /* ignore */
6125 break;
6126
6127 default:
6128 bfa_sm_fault(dport->bfa, event);
6129 }
6130}
6131
6132static void
6133bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6134 enum bfa_dport_sm_event event)
6135{
6136 bfa_trc(dport->bfa, event);
6137
6138 switch (event) {
6139 case BFA_DPORT_SM_QRESUME:
6140 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6141 bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6142 break;
6143
6144 case BFA_DPORT_SM_HWFAIL:
6145 bfa_reqq_wcancel(&dport->reqq_wait);
6146 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6147 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6148 break;
6149
6150 default:
6151 bfa_sm_fault(dport->bfa, event);
6152 }
6153}
6154
6155static void
6156bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6157{
6158 bfa_trc(dport->bfa, event);
6159
6160 switch (event) {
6161 case BFA_DPORT_SM_FWRSP:
6162 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6163 break;
6164
6165 case BFA_DPORT_SM_HWFAIL:
6166 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6167 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6168 break;
6169
6170 default:
6171 bfa_sm_fault(dport->bfa, event);
6172 }
6173}
6174
6175static void
6176bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6177{
6178 bfa_trc(dport->bfa, event);
6179
6180 switch (event) {
6181 case BFA_DPORT_SM_ENABLE:
6182 /* Already enabled */
6183 break;
6184
6185 case BFA_DPORT_SM_DISABLE:
6186 bfa_fcport_dportdisable(dport->bfa);
6187 if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6188 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6189 else
6190 bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6191 break;
6192
6193 case BFA_DPORT_SM_HWFAIL:
6194 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6195 break;
6196
6197 default:
6198 bfa_sm_fault(dport->bfa, event);
6199 }
6200}
6201
6202static void
6203bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6204 enum bfa_dport_sm_event event)
6205{
6206 bfa_trc(dport->bfa, event);
6207
6208 switch (event) {
6209 case BFA_DPORT_SM_QRESUME:
6210 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6211 bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6212 break;
6213
6214 case BFA_DPORT_SM_HWFAIL:
6215 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6216 bfa_reqq_wcancel(&dport->reqq_wait);
6217 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6218 break;
6219
6220 default:
6221 bfa_sm_fault(dport->bfa, event);
6222 }
6223}
6224
6225static void
6226bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6227{
6228 bfa_trc(dport->bfa, event);
6229
6230 switch (event) {
6231 case BFA_DPORT_SM_FWRSP:
6232 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6233 break;
6234
6235 case BFA_DPORT_SM_HWFAIL:
6236 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6237 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6238 break;
6239
6240 default:
6241 bfa_sm_fault(dport->bfa, event);
6242 }
6243}
6244
6245
6246static bfa_boolean_t
6247bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6248{
6249 struct bfi_diag_dport_req_s *m;
6250
6251 /*
6252 * Increment message tag before queue check, so that responses to old
6253 * requests are discarded.
6254 */
6255 dport->msgtag++;
6256
6257 /*
6258 * check for room in queue to send request now
6259 */
6260 m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6261 if (!m) {
6262 bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6263 return BFA_FALSE;
6264 }
6265
6266 bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6267 bfa_fn_lpu(dport->bfa));
6268 m->req = req;
6269 m->msgtag = dport->msgtag;
6270
6271 /*
6272 * queue I/O message to firmware
6273 */
6274 bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6275
6276 return BFA_TRUE;
6277}
6278
6279static void
6280bfa_dport_qresume(void *cbarg)
6281{
6282 struct bfa_dport_s *dport = cbarg;
6283
6284 bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6285}
6286
6287static void
6288bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
6289{
6290 bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6291 bfa_cb_fcdiag_dport(dport, msg->status);
6292}
6293
6294/*
6295 * Dport enable
6296 *
6297 * @param[in] *bfa - bfa data struct
6298 */
6299bfa_status_t
6300bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6301{
6302 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6303 struct bfa_dport_s *dport = &fcdiag->dport;
6304
6305 /*
6306 * Dport is not support in MEZZ card
6307 */
6308 if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6309 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6310 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6311 }
6312
6313 /*
6314 * Check to see if IOC is down
6315 */
6316 if (!bfa_iocfc_is_operational(bfa))
6317 return BFA_STATUS_IOC_NON_OP;
6318
6319 /* if port is PBC disabled, return error */
6320 if (bfa_fcport_is_pbcdisabled(bfa)) {
6321 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6322 return BFA_STATUS_PBC;
6323 }
6324
6325 /*
6326 * Check if port mode is FC port
6327 */
6328 if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6329 bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6330 return BFA_STATUS_CMD_NOTSUPP_CNA;
6331 }
6332
6333 /*
6334 * Check if port is in LOOP mode
6335 */
6336 if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6337 (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6338 bfa_trc(dport->bfa, 0);
6339 return BFA_STATUS_TOPOLOGY_LOOP;
6340 }
6341
6342 /*
6343 * Check if port is TRUNK mode
6344 */
6345 if (bfa_fcport_is_trunk_enabled(bfa)) {
6346 bfa_trc(dport->bfa, 0);
6347 return BFA_STATUS_ERROR_TRUNK_ENABLED;
6348 }
6349
6350 /*
6351 * Check to see if port is disable or in dport state
6352 */
6353 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6354 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6355 bfa_trc(dport->bfa, 0);
6356 return BFA_STATUS_PORT_NOT_DISABLED;
6357 }
6358
6359 /*
6360 * Check if dport is busy
6361 */
6362 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6363 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6364 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6365 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
6366 return BFA_STATUS_DEVBUSY;
6367 }
6368
6369 /*
6370 * Check if dport is already enabled
6371 */
6372 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6373 bfa_trc(dport->bfa, 0);
6374 return BFA_STATUS_DPORT_ENABLED;
6375 }
6376
6377 dport->cbfn = cbfn;
6378 dport->cbarg = cbarg;
6379
6380 bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6381 return BFA_STATUS_OK;
6382}
6383
6384/*
6385 * Dport disable
6386 *
6387 * @param[in] *bfa - bfa data struct
6388 */
6389bfa_status_t
6390bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6391{
6392 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6393 struct bfa_dport_s *dport = &fcdiag->dport;
6394
6395 if (bfa_ioc_is_disabled(&bfa->ioc))
6396 return BFA_STATUS_IOC_DISABLED;
6397
6398 /* if port is PBC disabled, return error */
6399 if (bfa_fcport_is_pbcdisabled(bfa)) {
6400 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6401 return BFA_STATUS_PBC;
6402 }
6403
6404 /*
6405 * Check to see if port is disable or in dport state
6406 */
6407 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6408 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6409 bfa_trc(dport->bfa, 0);
6410 return BFA_STATUS_PORT_NOT_DISABLED;
6411 }
6412
6413 /*
6414 * Check if dport is busy
6415 */
6416 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6417 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6418 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6419 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6420 return BFA_STATUS_DEVBUSY;
6421
6422 /*
6423 * Check if dport is already disabled
6424 */
6425 if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6426 bfa_trc(dport->bfa, 0);
6427 return BFA_STATUS_DPORT_DISABLED;
6428 }
6429
6430 dport->cbfn = cbfn;
6431 dport->cbarg = cbarg;
6432
6433 bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6434 return BFA_STATUS_OK;
6435}
6436
6437/*
6438 * Get D-port state
6439 *
6440 * @param[in] *bfa - bfa data struct
6441 */
6442
6443bfa_status_t
6444bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
6445{
6446 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6447 struct bfa_dport_s *dport = &fcdiag->dport;
6448
6449 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
6450 *state = BFA_DPORT_ST_ENABLED;
6451 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6452 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
6453 *state = BFA_DPORT_ST_ENABLING;
6454 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
6455 *state = BFA_DPORT_ST_DISABLED;
6456 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6457 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6458 *state = BFA_DPORT_ST_DISABLING;
6459 else {
6460 bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
6461 return BFA_STATUS_EINVAL;
6462 }
6463 return BFA_STATUS_OK;
6464}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 1abcf7c51661..8d7fbecfcb22 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -474,8 +474,10 @@ struct bfa_fcport_s {
474 /* supported speeds */ 474 /* supported speeds */
475 enum bfa_port_speed speed; /* current speed */ 475 enum bfa_port_speed speed; /* current speed */
476 enum bfa_port_topology topology; /* current topology */ 476 enum bfa_port_topology topology; /* current topology */
477 u8 myalpa; /* my ALPA in LOOP topology */
478 u8 rsvd[3]; 477 u8 rsvd[3];
478 u8 myalpa; /* my ALPA in LOOP topology */
479 u8 alpabm_valid; /* alpa bitmap valid or not */
480 struct fc_alpabm_s alpabm; /* alpa bitmap */
479 struct bfa_port_cfg_s cfg; /* current port configuration */ 481 struct bfa_port_cfg_s cfg; /* current port configuration */
480 bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ 482 bfa_boolean_t use_flash_cfg; /* get port cfg from flash */
481 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 483 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
@@ -512,6 +514,7 @@ struct bfa_fcport_s {
512 struct bfa_fcport_trunk_s trunk; 514 struct bfa_fcport_trunk_s trunk;
513 u16 fcoe_vlan; 515 u16 fcoe_vlan;
514 struct bfa_mem_dma_s fcport_dma; 516 struct bfa_mem_dma_s fcport_dma;
517 bfa_boolean_t stats_dma_ready;
515}; 518};
516 519
517#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) 520#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
@@ -534,6 +537,7 @@ enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
534bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, 537bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
535 enum bfa_port_topology topo); 538 enum bfa_port_topology topo);
536enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); 539enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
540enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa);
537bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); 541bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
538bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); 542bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
539u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); 543u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
@@ -547,6 +551,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
547 void (*event_cbfn) (void *cbarg, 551 void (*event_cbfn) (void *cbarg,
548 enum bfa_port_linkstate event), void *event_cbarg); 552 enum bfa_port_linkstate event), void *event_cbarg);
549bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); 553bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
554bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa);
555bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa,
556 struct bfa_qos_bw_s *qos_bw);
550enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); 557enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
551 558
552void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); 559void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
@@ -560,6 +567,8 @@ bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
560 struct bfa_cb_pending_q_s *cb); 567 struct bfa_cb_pending_q_s *cb);
561bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 568bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
562bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); 569bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
570void bfa_fcport_dportenable(struct bfa_s *bfa);
571void bfa_fcport_dportdisable(struct bfa_s *bfa);
563bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); 572bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
564void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); 573void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
565 574
@@ -575,6 +584,9 @@ void bfa_cb_rport_offline(void *rport);
575void bfa_cb_rport_qos_scn_flowid(void *rport, 584void bfa_cb_rport_qos_scn_flowid(void *rport,
576 struct bfa_rport_qos_attr_s old_qos_attr, 585 struct bfa_rport_qos_attr_s old_qos_attr,
577 struct bfa_rport_qos_attr_s new_qos_attr); 586 struct bfa_rport_qos_attr_s new_qos_attr);
587void bfa_cb_rport_scn_online(struct bfa_s *bfa);
588void bfa_cb_rport_scn_offline(struct bfa_s *bfa);
589void bfa_cb_rport_scn_no_dev(void *rp);
578void bfa_cb_rport_qos_scn_prio(void *rport, 590void bfa_cb_rport_qos_scn_prio(void *rport,
579 struct bfa_rport_qos_attr_s old_qos_attr, 591 struct bfa_rport_qos_attr_s old_qos_attr,
580 struct bfa_rport_qos_attr_s new_qos_attr); 592 struct bfa_rport_qos_attr_s new_qos_attr);
@@ -697,11 +709,21 @@ struct bfa_fcdiag_lb_s {
697 u32 status; 709 u32 status;
698}; 710};
699 711
712struct bfa_dport_s {
713 struct bfa_s *bfa; /* Back pointer to BFA */
714 bfa_sm_t sm; /* finite state machine */
715 u32 msgtag; /* firmware msg tag for reply */
716 struct bfa_reqq_wait_s reqq_wait;
717 bfa_cb_diag_t cbfn;
718 void *cbarg;
719};
720
700struct bfa_fcdiag_s { 721struct bfa_fcdiag_s {
701 struct bfa_s *bfa; /* Back pointer to BFA */ 722 struct bfa_s *bfa; /* Back pointer to BFA */
702 struct bfa_trc_mod_s *trcmod; 723 struct bfa_trc_mod_s *trcmod;
703 struct bfa_fcdiag_lb_s lb; 724 struct bfa_fcdiag_lb_s lb;
704 struct bfa_fcdiag_qtest_s qtest; 725 struct bfa_fcdiag_qtest_s qtest;
726 struct bfa_dport_s dport;
705}; 727};
706 728
707#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag) 729#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag)
@@ -717,5 +739,11 @@ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
717 u32 queue, struct bfa_diag_qtest_result_s *result, 739 u32 queue, struct bfa_diag_qtest_result_s *result,
718 bfa_cb_diag_t cbfn, void *cbarg); 740 bfa_cb_diag_t cbfn, void *cbarg);
719bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); 741bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
742bfa_status_t bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
743 void *cbarg);
744bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
745 void *cbarg);
746bfa_status_t bfa_dport_get_state(struct bfa_s *bfa,
747 enum bfa_dport_state *state);
720 748
721#endif /* __BFA_SVC_H__ */ 749#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index c37494916a1a..895b0e516e07 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; 63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
64u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; 64u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
65 65
66#define BFAD_FW_FILE_CB "cbfw.bin" 66#define BFAD_FW_FILE_CB "cbfw-3.1.0.0.bin"
67#define BFAD_FW_FILE_CT "ctfw.bin" 67#define BFAD_FW_FILE_CT "ctfw-3.1.0.0.bin"
68#define BFAD_FW_FILE_CT2 "ct2fw.bin" 68#define BFAD_FW_FILE_CT2 "ct2fw-3.1.0.0.bin"
69 69
70static u32 *bfad_load_fwimg(struct pci_dev *pdev); 70static u32 *bfad_load_fwimg(struct pci_dev *pdev);
71static void bfad_free_fwimg(void); 71static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 0afa39076cef..555e7db94a1c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -33,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
33 /* If IOC is not in disabled state - return */ 33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { 34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 35 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 iocmd->status = BFA_STATUS_IOC_FAILURE; 36 iocmd->status = BFA_STATUS_OK;
37 return rc; 37 return rc;
38 } 38 }
39 39
@@ -54,6 +54,12 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
54 unsigned long flags; 54 unsigned long flags;
55 55
56 spin_lock_irqsave(&bfad->bfad_lock, flags); 56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 iocmd->status = BFA_STATUS_OK;
60 return rc;
61 }
62
57 if (bfad->disable_active) { 63 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 64 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 return -EBUSY; 65 return -EBUSY;
@@ -101,9 +107,10 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
101 107
102 /* set adapter hw path */ 108 /* set adapter hw path */
103 strcpy(iocmd->adapter_hwpath, bfad->pci_name); 109 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
104 i = strlen(iocmd->adapter_hwpath) - 1; 110 for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
105 while (iocmd->adapter_hwpath[i] != '.') 111 ;
106 i--; 112 for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
113 ;
107 iocmd->adapter_hwpath[i] = '\0'; 114 iocmd->adapter_hwpath[i] = '\0';
108 iocmd->status = BFA_STATUS_OK; 115 iocmd->status = BFA_STATUS_OK;
109 return 0; 116 return 0;
@@ -880,6 +887,19 @@ out:
880} 887}
881 888
882int 889int
890bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
891{
892 struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
893 unsigned long flags;
894
895 spin_lock_irqsave(&bfad->bfad_lock, flags);
896 iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
897 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
898
899 return 0;
900}
901
902int
883bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) 903bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
884{ 904{
885 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; 905 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
@@ -888,16 +908,22 @@ bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
888 908
889 spin_lock_irqsave(&bfad->bfad_lock, flags); 909 spin_lock_irqsave(&bfad->bfad_lock, flags);
890 910
891 if (cmd == IOCMD_RATELIM_ENABLE) 911 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
892 fcport->cfg.ratelimit = BFA_TRUE; 912 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
893 else if (cmd == IOCMD_RATELIM_DISABLE) 913 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
894 fcport->cfg.ratelimit = BFA_FALSE; 914 else {
915 if (cmd == IOCMD_RATELIM_ENABLE)
916 fcport->cfg.ratelimit = BFA_TRUE;
917 else if (cmd == IOCMD_RATELIM_DISABLE)
918 fcport->cfg.ratelimit = BFA_FALSE;
895 919
896 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) 920 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
897 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; 921 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
922
923 iocmd->status = BFA_STATUS_OK;
924 }
898 925
899 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 926 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900 iocmd->status = BFA_STATUS_OK;
901 927
902 return 0; 928 return 0;
903} 929}
@@ -919,8 +945,13 @@ bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
919 return 0; 945 return 0;
920 } 946 }
921 947
922 fcport->cfg.trl_def_speed = iocmd->speed; 948 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
923 iocmd->status = BFA_STATUS_OK; 949 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
950 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
951 else {
952 fcport->cfg.trl_def_speed = iocmd->speed;
953 iocmd->status = BFA_STATUS_OK;
954 }
924 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 955 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
925 956
926 return 0; 957 return 0;
@@ -1167,8 +1198,8 @@ bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1167 spin_lock_irqsave(&bfad->bfad_lock, flags); 1198 spin_lock_irqsave(&bfad->bfad_lock, flags);
1168 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, 1199 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1169 &iocmd->pcifn_id, iocmd->port, 1200 &iocmd->pcifn_id, iocmd->port,
1170 iocmd->pcifn_class, iocmd->bandwidth, 1201 iocmd->pcifn_class, iocmd->bw_min,
1171 bfad_hcb_comp, &fcomp); 1202 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1172 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1203 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1173 if (iocmd->status != BFA_STATUS_OK) 1204 if (iocmd->status != BFA_STATUS_OK)
1174 goto out; 1205 goto out;
@@ -1211,8 +1242,8 @@ bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1211 init_completion(&fcomp.comp); 1242 init_completion(&fcomp.comp);
1212 spin_lock_irqsave(&bfad->bfad_lock, flags); 1243 spin_lock_irqsave(&bfad->bfad_lock, flags);
1213 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, 1244 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1214 iocmd->pcifn_id, iocmd->bandwidth, 1245 iocmd->pcifn_id, iocmd->bw_min,
1215 bfad_hcb_comp, &fcomp); 1246 iocmd->bw_max, bfad_hcb_comp, &fcomp);
1216 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1247 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1217 bfa_trc(bfad, iocmd->status); 1248 bfa_trc(bfad, iocmd->status);
1218 if (iocmd->status != BFA_STATUS_OK) 1249 if (iocmd->status != BFA_STATUS_OK)
@@ -1736,6 +1767,52 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1736} 1767}
1737 1768
1738int 1769int
1770bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1771{
1772 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1773 unsigned long flags;
1774 struct bfad_hal_comp fcomp;
1775
1776 init_completion(&fcomp.comp);
1777 spin_lock_irqsave(&bfad->bfad_lock, flags);
1778 if (cmd == IOCMD_DIAG_DPORT_ENABLE)
1779 iocmd->status = bfa_dport_enable(&bfad->bfa,
1780 bfad_hcb_comp, &fcomp);
1781 else if (cmd == IOCMD_DIAG_DPORT_DISABLE)
1782 iocmd->status = bfa_dport_disable(&bfad->bfa,
1783 bfad_hcb_comp, &fcomp);
1784 else {
1785 bfa_trc(bfad, 0);
1786 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1787 return -EINVAL;
1788 }
1789 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1790
1791 if (iocmd->status != BFA_STATUS_OK)
1792 bfa_trc(bfad, iocmd->status);
1793 else {
1794 wait_for_completion(&fcomp.comp);
1795 iocmd->status = fcomp.status;
1796 }
1797
1798 return 0;
1799}
1800
1801int
1802bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd)
1803{
1804 struct bfa_bsg_diag_dport_get_state_s *iocmd =
1805 (struct bfa_bsg_diag_dport_get_state_s *)pcmd;
1806 unsigned long flags;
1807
1808 spin_lock_irqsave(&bfad->bfad_lock, flags);
1809 iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state);
1810 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1811
1812 return 0;
1813}
1814
1815int
1739bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) 1816bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1740{ 1817{
1741 struct bfa_bsg_phy_attr_s *iocmd = 1818 struct bfa_bsg_phy_attr_s *iocmd =
@@ -2052,7 +2129,7 @@ bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2052 init_completion(&fcomp.comp); 2129 init_completion(&fcomp.comp);
2053 spin_lock_irqsave(&bfad->bfad_lock, flags); 2130 spin_lock_irqsave(&bfad->bfad_lock, flags);
2054 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), 2131 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2055 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), 2132 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2056 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2133 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2057 bfad_hcb_comp, &fcomp); 2134 bfad_hcb_comp, &fcomp);
2058 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2135 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2074,7 +2151,7 @@ bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2074 init_completion(&fcomp.comp); 2151 init_completion(&fcomp.comp);
2075 spin_lock_irqsave(&bfad->bfad_lock, flags); 2152 spin_lock_irqsave(&bfad->bfad_lock, flags);
2076 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), 2153 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2077 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), 2154 BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2078 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, 2155 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2079 bfad_hcb_comp, &fcomp); 2156 bfad_hcb_comp, &fcomp);
2080 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2157 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2161,22 +2238,31 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2161 2238
2162 spin_lock_irqsave(&bfad->bfad_lock, flags); 2239 spin_lock_irqsave(&bfad->bfad_lock, flags);
2163 2240
2164 if (v_cmd == IOCMD_TRUNK_ENABLE) { 2241 if (bfa_fcport_is_dport(&bfad->bfa))
2165 trunk->attr.state = BFA_TRUNK_OFFLINE; 2242 return BFA_STATUS_DPORT_ERR;
2166 bfa_fcport_disable(&bfad->bfa);
2167 fcport->cfg.trunked = BFA_TRUE;
2168 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2169 trunk->attr.state = BFA_TRUNK_DISABLED;
2170 bfa_fcport_disable(&bfad->bfa);
2171 fcport->cfg.trunked = BFA_FALSE;
2172 }
2173 2243
2174 if (!bfa_fcport_is_disabled(&bfad->bfa)) 2244 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2175 bfa_fcport_enable(&bfad->bfa); 2245 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2246 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2247 else {
2248 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2249 trunk->attr.state = BFA_TRUNK_OFFLINE;
2250 bfa_fcport_disable(&bfad->bfa);
2251 fcport->cfg.trunked = BFA_TRUE;
2252 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2253 trunk->attr.state = BFA_TRUNK_DISABLED;
2254 bfa_fcport_disable(&bfad->bfa);
2255 fcport->cfg.trunked = BFA_FALSE;
2256 }
2257
2258 if (!bfa_fcport_is_disabled(&bfad->bfa))
2259 bfa_fcport_enable(&bfad->bfa);
2260
2261 iocmd->status = BFA_STATUS_OK;
2262 }
2176 2263
2177 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2264 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2178 2265
2179 iocmd->status = BFA_STATUS_OK;
2180 return 0; 2266 return 0;
2181} 2267}
2182 2268
@@ -2189,12 +2275,17 @@ bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2189 unsigned long flags; 2275 unsigned long flags;
2190 2276
2191 spin_lock_irqsave(&bfad->bfad_lock, flags); 2277 spin_lock_irqsave(&bfad->bfad_lock, flags);
2192 memcpy((void *)&iocmd->attr, (void *)&trunk->attr, 2278 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2193 sizeof(struct bfa_trunk_attr_s)); 2279 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2194 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); 2280 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2281 else {
2282 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2283 sizeof(struct bfa_trunk_attr_s));
2284 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2285 iocmd->status = BFA_STATUS_OK;
2286 }
2195 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2287 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2196 2288
2197 iocmd->status = BFA_STATUS_OK;
2198 return 0; 2289 return 0;
2199} 2290}
2200 2291
@@ -2207,14 +2298,22 @@ bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2207 2298
2208 spin_lock_irqsave(&bfad->bfad_lock, flags); 2299 spin_lock_irqsave(&bfad->bfad_lock, flags);
2209 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { 2300 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2210 if (v_cmd == IOCMD_QOS_ENABLE) 2301 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2211 fcport->cfg.qos_enabled = BFA_TRUE; 2302 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2212 else if (v_cmd == IOCMD_QOS_DISABLE) 2303 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2213 fcport->cfg.qos_enabled = BFA_FALSE; 2304 else {
2305 if (v_cmd == IOCMD_QOS_ENABLE)
2306 fcport->cfg.qos_enabled = BFA_TRUE;
2307 else if (v_cmd == IOCMD_QOS_DISABLE) {
2308 fcport->cfg.qos_enabled = BFA_FALSE;
2309 fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2310 fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2311 fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2312 }
2313 }
2214 } 2314 }
2215 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2315 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2216 2316
2217 iocmd->status = BFA_STATUS_OK;
2218 return 0; 2317 return 0;
2219} 2318}
2220 2319
@@ -2226,11 +2325,21 @@ bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2226 unsigned long flags; 2325 unsigned long flags;
2227 2326
2228 spin_lock_irqsave(&bfad->bfad_lock, flags); 2327 spin_lock_irqsave(&bfad->bfad_lock, flags);
2229 iocmd->attr.state = fcport->qos_attr.state; 2328 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2230 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); 2329 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2330 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2331 else {
2332 iocmd->attr.state = fcport->qos_attr.state;
2333 iocmd->attr.total_bb_cr =
2334 be32_to_cpu(fcport->qos_attr.total_bb_cr);
2335 iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2336 iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2337 iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2338 iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2339 iocmd->status = BFA_STATUS_OK;
2340 }
2231 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2232 2342
2233 iocmd->status = BFA_STATUS_OK;
2234 return 0; 2343 return 0;
2235} 2344}
2236 2345
@@ -2274,6 +2383,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2274 struct bfad_hal_comp fcomp; 2383 struct bfad_hal_comp fcomp;
2275 unsigned long flags; 2384 unsigned long flags;
2276 struct bfa_cb_pending_q_s cb_qe; 2385 struct bfa_cb_pending_q_s cb_qe;
2386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2277 2387
2278 init_completion(&fcomp.comp); 2388 init_completion(&fcomp.comp);
2279 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2389 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
@@ -2281,7 +2391,11 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2281 2391
2282 spin_lock_irqsave(&bfad->bfad_lock, flags); 2392 spin_lock_irqsave(&bfad->bfad_lock, flags);
2283 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2393 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2284 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); 2394 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2395 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2396 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2397 else
2398 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2285 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2399 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2286 if (iocmd->status != BFA_STATUS_OK) { 2400 if (iocmd->status != BFA_STATUS_OK) {
2287 bfa_trc(bfad, iocmd->status); 2401 bfa_trc(bfad, iocmd->status);
@@ -2300,6 +2414,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2300 struct bfad_hal_comp fcomp; 2414 struct bfad_hal_comp fcomp;
2301 unsigned long flags; 2415 unsigned long flags;
2302 struct bfa_cb_pending_q_s cb_qe; 2416 struct bfa_cb_pending_q_s cb_qe;
2417 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2303 2418
2304 init_completion(&fcomp.comp); 2419 init_completion(&fcomp.comp);
2305 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, 2420 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
@@ -2307,7 +2422,11 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2307 2422
2308 spin_lock_irqsave(&bfad->bfad_lock, flags); 2423 spin_lock_irqsave(&bfad->bfad_lock, flags);
2309 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); 2424 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2310 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); 2425 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2426 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2427 iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2428 else
2429 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2311 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2430 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2312 if (iocmd->status != BFA_STATUS_OK) { 2431 if (iocmd->status != BFA_STATUS_OK) {
2313 bfa_trc(bfad, iocmd->status); 2432 bfa_trc(bfad, iocmd->status);
@@ -2435,6 +2554,139 @@ bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2435 return 0; 2554 return 0;
2436} 2555}
2437 2556
2557int
2558bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2559{
2560 struct bfa_bsg_fcpim_throttle_s *iocmd =
2561 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2562 unsigned long flags;
2563
2564 spin_lock_irqsave(&bfad->bfad_lock, flags);
2565 iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2566 (void *)&iocmd->throttle);
2567 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2568
2569 return 0;
2570}
2571
2572int
2573bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2574{
2575 struct bfa_bsg_fcpim_throttle_s *iocmd =
2576 (struct bfa_bsg_fcpim_throttle_s *)cmd;
2577 unsigned long flags;
2578
2579 spin_lock_irqsave(&bfad->bfad_lock, flags);
2580 iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2581 iocmd->throttle.cfg_value);
2582 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2583
2584 return 0;
2585}
2586
2587int
2588bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2589{
2590 struct bfa_bsg_tfru_s *iocmd =
2591 (struct bfa_bsg_tfru_s *)cmd;
2592 struct bfad_hal_comp fcomp;
2593 unsigned long flags = 0;
2594
2595 init_completion(&fcomp.comp);
2596 spin_lock_irqsave(&bfad->bfad_lock, flags);
2597 iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2598 &iocmd->data, iocmd->len, iocmd->offset,
2599 bfad_hcb_comp, &fcomp);
2600 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2601 if (iocmd->status == BFA_STATUS_OK) {
2602 wait_for_completion(&fcomp.comp);
2603 iocmd->status = fcomp.status;
2604 }
2605
2606 return 0;
2607}
2608
2609int
2610bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2611{
2612 struct bfa_bsg_tfru_s *iocmd =
2613 (struct bfa_bsg_tfru_s *)cmd;
2614 struct bfad_hal_comp fcomp;
2615 unsigned long flags = 0;
2616
2617 init_completion(&fcomp.comp);
2618 spin_lock_irqsave(&bfad->bfad_lock, flags);
2619 iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2620 &iocmd->data, iocmd->len, iocmd->offset,
2621 bfad_hcb_comp, &fcomp);
2622 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2623 if (iocmd->status == BFA_STATUS_OK) {
2624 wait_for_completion(&fcomp.comp);
2625 iocmd->status = fcomp.status;
2626 }
2627
2628 return 0;
2629}
2630
2631int
2632bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2633{
2634 struct bfa_bsg_fruvpd_s *iocmd =
2635 (struct bfa_bsg_fruvpd_s *)cmd;
2636 struct bfad_hal_comp fcomp;
2637 unsigned long flags = 0;
2638
2639 init_completion(&fcomp.comp);
2640 spin_lock_irqsave(&bfad->bfad_lock, flags);
2641 iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2642 &iocmd->data, iocmd->len, iocmd->offset,
2643 bfad_hcb_comp, &fcomp);
2644 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2645 if (iocmd->status == BFA_STATUS_OK) {
2646 wait_for_completion(&fcomp.comp);
2647 iocmd->status = fcomp.status;
2648 }
2649
2650 return 0;
2651}
2652
2653int
2654bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2655{
2656 struct bfa_bsg_fruvpd_s *iocmd =
2657 (struct bfa_bsg_fruvpd_s *)cmd;
2658 struct bfad_hal_comp fcomp;
2659 unsigned long flags = 0;
2660
2661 init_completion(&fcomp.comp);
2662 spin_lock_irqsave(&bfad->bfad_lock, flags);
2663 iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2664 &iocmd->data, iocmd->len, iocmd->offset,
2665 bfad_hcb_comp, &fcomp);
2666 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2667 if (iocmd->status == BFA_STATUS_OK) {
2668 wait_for_completion(&fcomp.comp);
2669 iocmd->status = fcomp.status;
2670 }
2671
2672 return 0;
2673}
2674
2675int
2676bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2677{
2678 struct bfa_bsg_fruvpd_max_size_s *iocmd =
2679 (struct bfa_bsg_fruvpd_max_size_s *)cmd;
2680 unsigned long flags = 0;
2681
2682 spin_lock_irqsave(&bfad->bfad_lock, flags);
2683 iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2684 &iocmd->max_size);
2685 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2686
2687 return 0;
2688}
2689
2438static int 2690static int
2439bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2691bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2440 unsigned int payload_len) 2692 unsigned int payload_len)
@@ -2660,6 +2912,13 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2660 case IOCMD_DIAG_LB_STAT: 2912 case IOCMD_DIAG_LB_STAT:
2661 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); 2913 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2662 break; 2914 break;
2915 case IOCMD_DIAG_DPORT_ENABLE:
2916 case IOCMD_DIAG_DPORT_DISABLE:
2917 rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd);
2918 break;
2919 case IOCMD_DIAG_DPORT_GET_STATE:
2920 rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd);
2921 break;
2663 case IOCMD_PHY_GET_ATTR: 2922 case IOCMD_PHY_GET_ATTR:
2664 rc = bfad_iocmd_phy_get_attr(bfad, iocmd); 2923 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
2665 break; 2924 break;
@@ -2741,6 +3000,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2741 case IOCMD_QOS_RESET_STATS: 3000 case IOCMD_QOS_RESET_STATS:
2742 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); 3001 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
2743 break; 3002 break;
3003 case IOCMD_QOS_SET_BW:
3004 rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3005 break;
2744 case IOCMD_VF_GET_STATS: 3006 case IOCMD_VF_GET_STATS:
2745 rc = bfad_iocmd_vf_get_stats(bfad, iocmd); 3007 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
2746 break; 3008 break;
@@ -2759,6 +3021,29 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2759 case IOCMD_FCPIM_LUNMASK_DELETE: 3021 case IOCMD_FCPIM_LUNMASK_DELETE:
2760 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 3022 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
2761 break; 3023 break;
3024 case IOCMD_FCPIM_THROTTLE_QUERY:
3025 rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3026 break;
3027 case IOCMD_FCPIM_THROTTLE_SET:
3028 rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3029 break;
3030 /* TFRU */
3031 case IOCMD_TFRU_READ:
3032 rc = bfad_iocmd_tfru_read(bfad, iocmd);
3033 break;
3034 case IOCMD_TFRU_WRITE:
3035 rc = bfad_iocmd_tfru_write(bfad, iocmd);
3036 break;
3037 /* FRU */
3038 case IOCMD_FRUVPD_READ:
3039 rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3040 break;
3041 case IOCMD_FRUVPD_UPDATE:
3042 rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3043 break;
3044 case IOCMD_FRUVPD_GET_MAX_SIZE:
3045 rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3046 break;
2762 default: 3047 default:
2763 rc = -EINVAL; 3048 rc = -EINVAL;
2764 break; 3049 break;
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 8c569ddb750d..15e1fc8e796b 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -141,6 +141,17 @@ enum {
141 IOCMD_FCPIM_LUNMASK_QUERY, 141 IOCMD_FCPIM_LUNMASK_QUERY,
142 IOCMD_FCPIM_LUNMASK_ADD, 142 IOCMD_FCPIM_LUNMASK_ADD,
143 IOCMD_FCPIM_LUNMASK_DELETE, 143 IOCMD_FCPIM_LUNMASK_DELETE,
144 IOCMD_DIAG_DPORT_ENABLE,
145 IOCMD_DIAG_DPORT_DISABLE,
146 IOCMD_DIAG_DPORT_GET_STATE,
147 IOCMD_QOS_SET_BW,
148 IOCMD_FCPIM_THROTTLE_QUERY,
149 IOCMD_FCPIM_THROTTLE_SET,
150 IOCMD_TFRU_READ,
151 IOCMD_TFRU_WRITE,
152 IOCMD_FRUVPD_READ,
153 IOCMD_FRUVPD_UPDATE,
154 IOCMD_FRUVPD_GET_MAX_SIZE,
144}; 155};
145 156
146struct bfa_bsg_gen_s { 157struct bfa_bsg_gen_s {
@@ -463,7 +474,8 @@ struct bfa_bsg_pcifn_s {
463 bfa_status_t status; 474 bfa_status_t status;
464 u16 bfad_num; 475 u16 bfad_num;
465 u16 pcifn_id; 476 u16 pcifn_id;
466 u32 bandwidth; 477 u16 bw_min;
478 u16 bw_max;
467 u8 port; 479 u8 port;
468 enum bfi_pcifn_class pcifn_class; 480 enum bfi_pcifn_class pcifn_class;
469 u8 rsvd[1]; 481 u8 rsvd[1];
@@ -613,6 +625,13 @@ struct bfa_bsg_diag_lb_stat_s {
613 u16 rsvd; 625 u16 rsvd;
614}; 626};
615 627
628struct bfa_bsg_diag_dport_get_state_s {
629 bfa_status_t status;
630 u16 bfad_num;
631 u16 rsvd;
632 enum bfa_dport_state state;
633};
634
616struct bfa_bsg_phy_attr_s { 635struct bfa_bsg_phy_attr_s {
617 bfa_status_t status; 636 bfa_status_t status;
618 u16 bfad_num; 637 u16 bfad_num;
@@ -694,6 +713,13 @@ struct bfa_bsg_qos_vc_attr_s {
694 struct bfa_qos_vc_attr_s attr; 713 struct bfa_qos_vc_attr_s attr;
695}; 714};
696 715
716struct bfa_bsg_qos_bw_s {
717 bfa_status_t status;
718 u16 bfad_num;
719 u16 rsvd;
720 struct bfa_qos_bw_s qos_bw;
721};
722
697struct bfa_bsg_vf_stats_s { 723struct bfa_bsg_vf_stats_s {
698 bfa_status_t status; 724 bfa_status_t status;
699 u16 bfad_num; 725 u16 bfad_num;
@@ -722,6 +748,41 @@ struct bfa_bsg_fcpim_lunmask_s {
722 struct scsi_lun lun; 748 struct scsi_lun lun;
723}; 749};
724 750
751struct bfa_bsg_fcpim_throttle_s {
752 bfa_status_t status;
753 u16 bfad_num;
754 u16 vf_id;
755 struct bfa_defs_fcpim_throttle_s throttle;
756};
757
758#define BFA_TFRU_DATA_SIZE 64
759#define BFA_MAX_FRUVPD_TRANSFER_SIZE 0x1000
760
761struct bfa_bsg_tfru_s {
762 bfa_status_t status;
763 u16 bfad_num;
764 u16 rsvd;
765 u32 offset;
766 u32 len;
767 u8 data[BFA_TFRU_DATA_SIZE];
768};
769
770struct bfa_bsg_fruvpd_s {
771 bfa_status_t status;
772 u16 bfad_num;
773 u16 rsvd;
774 u32 offset;
775 u32 len;
776 u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE];
777};
778
779struct bfa_bsg_fruvpd_max_size_s {
780 bfa_status_t status;
781 u16 bfad_num;
782 u16 rsvd;
783 u32 max_size;
784};
785
725struct bfa_bsg_fcpt_s { 786struct bfa_bsg_fcpt_s {
726 bfa_status_t status; 787 bfa_status_t status;
727 u16 vf_id; 788 u16 vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 1840651ce1d4..0c64a04f01fa 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -57,7 +57,7 @@
57#ifdef BFA_DRIVER_VERSION 57#ifdef BFA_DRIVER_VERSION
58#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 58#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
59#else 59#else
60#define BFAD_DRIVER_VERSION "3.1.2.0" 60#define BFAD_DRIVER_VERSION "3.1.2.1"
61#endif 61#endif
62 62
63#define BFAD_PROTO_NAME FCPI_NAME 63#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index b2ba0b2e91b2..57b146bca18c 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -210,7 +210,8 @@ enum bfi_mclass {
210 BFI_MC_PORT = 21, /* Physical port */ 210 BFI_MC_PORT = 21, /* Physical port */
211 BFI_MC_SFP = 22, /* SFP module */ 211 BFI_MC_SFP = 22, /* SFP module */
212 BFI_MC_PHY = 25, /* External PHY message class */ 212 BFI_MC_PHY = 25, /* External PHY message class */
213 BFI_MC_MAX = 32 213 BFI_MC_FRU = 34,
214 BFI_MC_MAX = 35
214}; 215};
215 216
216#define BFI_IOC_MAX_CQS 4 217#define BFI_IOC_MAX_CQS 4
@@ -288,6 +289,9 @@ struct bfi_ioc_attr_s {
288 char optrom_version[BFA_VERSION_LEN]; 289 char optrom_version[BFA_VERSION_LEN];
289 struct bfa_mfg_vpd_s vpd; 290 struct bfa_mfg_vpd_s vpd;
290 u32 card_type; /* card type */ 291 u32 card_type; /* card type */
292 u8 mfg_day; /* manufacturing day */
293 u8 mfg_month; /* manufacturing month */
294 u16 mfg_year; /* manufacturing year */
291}; 295};
292 296
293/* 297/*
@@ -687,7 +691,8 @@ struct bfi_ablk_h2i_pf_req_s {
687 u8 pcifn; 691 u8 pcifn;
688 u8 port; 692 u8 port;
689 u16 pers; 693 u16 pers;
690 u32 bw; 694 u16 bw_min; /* percent BW @ max speed */
695 u16 bw_max; /* percent BW @ max speed */
691}; 696};
692 697
693/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */ 698/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
@@ -957,6 +962,7 @@ enum bfi_diag_h2i {
957 BFI_DIAG_H2I_TEMPSENSOR = 4, 962 BFI_DIAG_H2I_TEMPSENSOR = 4,
958 BFI_DIAG_H2I_LEDTEST = 5, 963 BFI_DIAG_H2I_LEDTEST = 5,
959 BFI_DIAG_H2I_QTEST = 6, 964 BFI_DIAG_H2I_QTEST = 6,
965 BFI_DIAG_H2I_DPORT = 7,
960}; 966};
961 967
962enum bfi_diag_i2h { 968enum bfi_diag_i2h {
@@ -966,6 +972,7 @@ enum bfi_diag_i2h {
966 BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR), 972 BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
967 BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), 973 BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
968 BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), 974 BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST),
975 BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT),
969}; 976};
970 977
971#define BFI_DIAG_MAX_SGES 2 978#define BFI_DIAG_MAX_SGES 2
@@ -1052,6 +1059,23 @@ struct bfi_diag_qtest_req_s {
1052#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s 1059#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
1053 1060
1054/* 1061/*
1062 * D-port test
1063 */
1064enum bfi_dport_req {
1065 BFI_DPORT_DISABLE = 0, /* disable dport request */
1066 BFI_DPORT_ENABLE = 1, /* enable dport request */
1067};
1068
1069struct bfi_diag_dport_req_s {
1070 struct bfi_mhdr_s mh; /* 4 bytes */
1071 u8 req; /* request 1: enable 0: disable */
1072 u8 status; /* reply status */
1073 u8 rsvd[2];
1074 u32 msgtag; /* msgtag for reply */
1075};
1076#define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s
1077
1078/*
1055 * PHY module specific 1079 * PHY module specific
1056 */ 1080 */
1057enum bfi_phy_h2i_msgs_e { 1081enum bfi_phy_h2i_msgs_e {
@@ -1147,6 +1171,50 @@ struct bfi_phy_write_rsp_s {
1147 u32 length; 1171 u32 length;
1148}; 1172};
1149 1173
1174enum bfi_fru_h2i_msgs {
1175 BFI_FRUVPD_H2I_WRITE_REQ = 1,
1176 BFI_FRUVPD_H2I_READ_REQ = 2,
1177 BFI_TFRU_H2I_WRITE_REQ = 3,
1178 BFI_TFRU_H2I_READ_REQ = 4,
1179};
1180
1181enum bfi_fru_i2h_msgs {
1182 BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1),
1183 BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2),
1184 BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3),
1185 BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4),
1186};
1187
1188/*
1189 * FRU write request
1190 */
1191struct bfi_fru_write_req_s {
1192 struct bfi_mhdr_s mh; /* Common msg header */
1193 u8 last;
1194 u8 rsv[3];
1195 u32 offset;
1196 u32 length;
1197 struct bfi_alen_s alen;
1198};
1199
1200/*
1201 * FRU read request
1202 */
1203struct bfi_fru_read_req_s {
1204 struct bfi_mhdr_s mh; /* Common msg header */
1205 u32 offset;
1206 u32 length;
1207 struct bfi_alen_s alen;
1208};
1209
1210/*
1211 * FRU response
1212 */
1213struct bfi_fru_rsp_s {
1214 struct bfi_mhdr_s mh; /* Common msg header */
1215 u32 status;
1216 u32 length;
1217};
1150#pragma pack() 1218#pragma pack()
1151 1219
1152#endif /* __BFI_H__ */ 1220#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index d4220e13cafa..5ae2c167b2c8 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -426,6 +426,7 @@ struct bfi_lps_login_req_s {
426 u8 auth_en; 426 u8 auth_en;
427 u8 lps_role; 427 u8 lps_role;
428 u8 bb_scn; 428 u8 bb_scn;
429 u32 vvl_flag;
429}; 430};
430 431
431struct bfi_lps_login_rsp_s { 432struct bfi_lps_login_rsp_s {
@@ -499,6 +500,9 @@ enum bfi_rport_i2h_msgs {
499 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), 500 BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
500 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), 501 BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
501 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), 502 BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3),
503 BFI_RPORT_I2H_LIP_SCN_ONLINE = BFA_I2HM(4),
504 BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5),
505 BFI_RPORT_I2H_NO_DEV = BFA_I2HM(6),
502}; 506};
503 507
504struct bfi_rport_create_req_s { 508struct bfi_rport_create_req_s {
@@ -551,6 +555,14 @@ struct bfi_rport_qos_scn_s {
551 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ 555 struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */
552}; 556};
553 557
558struct bfi_rport_lip_scn_s {
559 struct bfi_mhdr_s mh; /*!< common msg header */
560 u16 bfa_handle; /*!< host rport handle */
561 u8 status; /*!< scn online status */
562 u8 rsvd;
563 struct bfa_fcport_loop_info_s loop_info;
564};
565
554union bfi_rport_h2i_msg_u { 566union bfi_rport_h2i_msg_u {
555 struct bfi_msg_s *msg; 567 struct bfi_msg_s *msg;
556 struct bfi_rport_create_req_s *create_req; 568 struct bfi_rport_create_req_s *create_req;
@@ -563,6 +575,7 @@ union bfi_rport_i2h_msg_u {
563 struct bfi_rport_create_rsp_s *create_rsp; 575 struct bfi_rport_create_rsp_s *create_rsp;
564 struct bfi_rport_delete_rsp_s *delete_rsp; 576 struct bfi_rport_delete_rsp_s *delete_rsp;
565 struct bfi_rport_qos_scn_s *qos_scn_evt; 577 struct bfi_rport_qos_scn_s *qos_scn_evt;
578 struct bfi_rport_lip_scn_s *lip_scn;
566}; 579};
567 580
568/* 581/*
@@ -828,6 +841,7 @@ enum bfi_tskim_status {
828 */ 841 */
829 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ 842 BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */
830 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ 843 BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */
844 BFI_TSKIM_STS_UTAG = 12, /* unknown tag for request */
831}; 845};
832 846
833struct bfi_tskim_rsp_s { 847struct bfi_tskim_rsp_s {
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index ed5f159e1867..99133bcf53f9 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -338,6 +338,7 @@ enum {
338#define __A2T_AHB_LOAD 0x00000800 338#define __A2T_AHB_LOAD 0x00000800
339#define __WGN_READY 0x00000400 339#define __WGN_READY 0x00000400
340#define __GLBL_PF_VF_CFG_RDY 0x00000200 340#define __GLBL_PF_VF_CFG_RDY 0x00000200
341#define CT2_NFC_STS_REG 0x00027410
341#define CT2_NFC_CSR_CLR_REG 0x00027420 342#define CT2_NFC_CSR_CLR_REG 0x00027420
342#define CT2_NFC_CSR_SET_REG 0x00027424 343#define CT2_NFC_CSR_SET_REG 0x00027424
343#define __HALT_NFC_CONTROLLER 0x00000002 344#define __HALT_NFC_CONTROLLER 0x00000002
@@ -355,6 +356,8 @@ enum {
355 (CT2_CSI_MAC0_CONTROL_REG + \ 356 (CT2_CSI_MAC0_CONTROL_REG + \
356 (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) 357 (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
357 358
359#define CT2_NFC_FLASH_STS_REG 0x00014834
360#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS 0x00000020
358/* 361/*
359 * Name semaphore registers based on usage 362 * Name semaphore registers based on usage
360 */ 363 */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 078d262ac7cc..666b7ac4475f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1643,7 +1643,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1643 skb_reset_network_header(skb); 1643 skb_reset_network_header(skb);
1644 skb->mac_len = elen; 1644 skb->mac_len = elen;
1645 skb->protocol = htons(ETH_P_FCOE); 1645 skb->protocol = htons(ETH_P_FCOE);
1646 skb->priority = port->priority; 1646 skb->priority = fcoe->priority;
1647 1647
1648 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1648 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1649 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { 1649 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
@@ -1917,7 +1917,6 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1917 struct fcoe_ctlr *ctlr; 1917 struct fcoe_ctlr *ctlr;
1918 struct fcoe_interface *fcoe; 1918 struct fcoe_interface *fcoe;
1919 struct net_device *netdev; 1919 struct net_device *netdev;
1920 struct fcoe_port *port;
1921 int prio; 1920 int prio;
1922 1921
1923 if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE) 1922 if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
@@ -1946,10 +1945,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1946 entry->app.protocol == ETH_P_FCOE) 1945 entry->app.protocol == ETH_P_FCOE)
1947 ctlr->priority = prio; 1946 ctlr->priority = prio;
1948 1947
1949 if (entry->app.protocol == ETH_P_FCOE) { 1948 if (entry->app.protocol == ETH_P_FCOE)
1950 port = lport_priv(ctlr->lp); 1949 fcoe->priority = prio;
1951 port->priority = prio;
1952 }
1953 1950
1954 return NOTIFY_OK; 1951 return NOTIFY_OK;
1955} 1952}
@@ -2180,7 +2177,6 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2180 u8 fup, up; 2177 u8 fup, up;
2181 struct net_device *netdev = fcoe->realdev; 2178 struct net_device *netdev = fcoe->realdev;
2182 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe); 2179 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2183 struct fcoe_port *port = lport_priv(ctlr->lp);
2184 struct dcb_app app = { 2180 struct dcb_app app = {
2185 .priority = 0, 2181 .priority = 0,
2186 .protocol = ETH_P_FCOE 2182 .protocol = ETH_P_FCOE
@@ -2202,8 +2198,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2202 fup = dcb_getapp(netdev, &app); 2198 fup = dcb_getapp(netdev, &app);
2203 } 2199 }
2204 2200
2205 port->priority = ffs(up) ? ffs(up) - 1 : 0; 2201 fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
2206 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority; 2202 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
2207 } 2203 }
2208#endif 2204#endif
2209} 2205}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index a624add4f8ec..b42dc32cb5eb 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,6 +71,7 @@ do { \
71 * @oem: The offload exchange manager for all local port 71 * @oem: The offload exchange manager for all local port
72 * instances associated with this port 72 * instances associated with this port
73 * @removed: Indicates fcoe interface removed from net device 73 * @removed: Indicates fcoe interface removed from net device
74 * @priority: Priority for the FCoE packet (DCB)
74 * This structure is 1:1 with a net device. 75 * This structure is 1:1 with a net device.
75 */ 76 */
76struct fcoe_interface { 77struct fcoe_interface {
@@ -81,6 +82,7 @@ struct fcoe_interface {
81 struct packet_type fip_packet_type; 82 struct packet_type fip_packet_type;
82 struct fc_exch_mgr *oem; 83 struct fc_exch_mgr *oem;
83 u8 removed; 84 u8 removed;
85 u8 priority;
84}; 86};
85 87
86#define fcoe_to_ctlr(x) \ 88#define fcoe_to_ctlr(x) \
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 14243fa5f8e8..fcb9d0b20ee4 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -851,7 +851,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
851 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 851 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
852 if (flags & FCP_RSP_LEN_VAL) { 852 if (flags & FCP_RSP_LEN_VAL) {
853 respl = ntohl(rp_ex->fr_rsp_len); 853 respl = ntohl(rp_ex->fr_rsp_len);
854 if (respl != sizeof(*fc_rp_info)) 854 if ((respl != FCP_RESP_RSP_INFO_LEN4) &&
855 (respl != FCP_RESP_RSP_INFO_LEN8))
855 goto len_err; 856 goto len_err;
856 if (fsp->wait_for_comp) { 857 if (fsp->wait_for_comp) {
857 /* Abuse cdb_status for rsp code */ 858 /* Abuse cdb_status for rsp code */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a184c2443a64..69b59935b53f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -27,6 +27,8 @@
27 27
28struct lpfc_sli2_slim; 28struct lpfc_sli2_slim;
29 29
30#define ELX_MODEL_NAME_SIZE 80
31
30#define LPFC_PCI_DEV_LP 0x1 32#define LPFC_PCI_DEV_LP 0x1
31#define LPFC_PCI_DEV_OC 0x2 33#define LPFC_PCI_DEV_OC 0x2
32 34
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b032562aa0d9..ad16e54ac383 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3935,6 +3935,12 @@ MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
3935# - Only meaningful if BG is turned on (lpfc_enable_bg=1). 3935# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
3936# - Allows you to ultimately specify which profiles to use 3936# - Allows you to ultimately specify which profiles to use
3937# - Default will result in registering capabilities for all profiles. 3937# - Default will result in registering capabilities for all profiles.
3938# - SHOST_DIF_TYPE1_PROTECTION 1
3939# HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
3940# - SHOST_DIX_TYPE0_PROTECTION 8
3941# HBA supports DIX Type 0: Host to HBA protection only
3942# - SHOST_DIX_TYPE1_PROTECTION 16
3943# HBA supports DIX Type 1: Host to HBA Type 1 protection
3938# 3944#
3939*/ 3945*/
3940unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION | 3946unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
@@ -3947,7 +3953,7 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
3947/* 3953/*
3948# lpfc_prot_guard: i 3954# lpfc_prot_guard: i
3949# - Bit mask of protection guard types to register with the SCSI mid-layer 3955# - Bit mask of protection guard types to register with the SCSI mid-layer
3950# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC 3956# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
3951# - Allows you to ultimately specify which profiles to use 3957# - Allows you to ultimately specify which profiles to use
3952# - Default will result in registering capabilities for all guard types 3958# - Default will result in registering capabilities for all guard types
3953# 3959#
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e470c489de07..4380a44000bc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -467,3 +467,4 @@ int lpfc_sli4_read_config(struct lpfc_hba *);
467void lpfc_sli4_node_prep(struct lpfc_hba *); 467void lpfc_sli4_node_prep(struct lpfc_hba *);
468int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); 468int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
469void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); 469void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
470uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index cfe533bc9790..f19e9b6f9f13 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -809,6 +809,8 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
809 phba->fc_ratov = FF_DEF_RATOV; 809 phba->fc_ratov = FF_DEF_RATOV;
810 rc = memcmp(&vport->fc_portname, &sp->portName, 810 rc = memcmp(&vport->fc_portname, &sp->portName,
811 sizeof(vport->fc_portname)); 811 sizeof(vport->fc_portname));
812 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
813
812 if (rc >= 0) { 814 if (rc >= 0) {
813 /* This side will initiate the PLOGI */ 815 /* This side will initiate the PLOGI */
814 spin_lock_irq(shost->host_lock); 816 spin_lock_irq(shost->host_lock);
@@ -3160,7 +3162,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3160 retry = 1; 3162 retry = 1;
3161 break; 3163 break;
3162 } 3164 }
3163 if (cmd == ELS_CMD_PLOGI) { 3165 if ((cmd == ELS_CMD_PLOGI) ||
3166 (cmd == ELS_CMD_PRLI)) {
3164 delay = 1000; 3167 delay = 1000;
3165 maxretry = lpfc_max_els_tries + 1; 3168 maxretry = lpfc_max_els_tries + 1;
3166 retry = 1; 3169 retry = 1;
@@ -3305,7 +3308,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3305 ndlp->nlp_prev_state = ndlp->nlp_state; 3308 ndlp->nlp_prev_state = ndlp->nlp_state;
3306 if (cmd == ELS_CMD_PRLI) 3309 if (cmd == ELS_CMD_PRLI)
3307 lpfc_nlp_set_state(vport, ndlp, 3310 lpfc_nlp_set_state(vport, ndlp,
3308 NLP_STE_REG_LOGIN_ISSUE); 3311 NLP_STE_PRLI_ISSUE);
3309 else 3312 else
3310 lpfc_nlp_set_state(vport, ndlp, 3313 lpfc_nlp_set_state(vport, ndlp,
3311 NLP_STE_NPR_NODE); 3314 NLP_STE_NPR_NODE);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e9845d2ecf10..d7096ad94d3f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1506,9 +1506,10 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1506 } 1506 }
1507 } 1507 }
1508 1508
1509 /* If FCF not available return 0 */ 1509 /* FCF not valid/available or solicitation in progress */
1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1512 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1512 return 0; 1513 return 0;
1513 1514
1514 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 1515 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
@@ -1842,6 +1843,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1842 "\tFCF_Index : x%x\n" 1843 "\tFCF_Index : x%x\n"
1843 "\tFCF_Avail : x%x\n" 1844 "\tFCF_Avail : x%x\n"
1844 "\tFCF_Valid : x%x\n" 1845 "\tFCF_Valid : x%x\n"
1846 "\tFCF_SOL : x%x\n"
1845 "\tFIP_Priority : x%x\n" 1847 "\tFIP_Priority : x%x\n"
1846 "\tMAC_Provider : x%x\n" 1848 "\tMAC_Provider : x%x\n"
1847 "\tLowest VLANID : x%x\n" 1849 "\tLowest VLANID : x%x\n"
@@ -1852,6 +1854,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1852 bf_get(lpfc_fcf_record_fcf_index, fcf_record), 1854 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1853 bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 1855 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1854 bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 1856 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1857 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1855 fcf_record->fip_priority, 1858 fcf_record->fip_priority,
1856 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 1859 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1857 vlan_id, 1860 vlan_id,
@@ -2185,12 +2188,14 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2185 new_fcf_record)); 2188 new_fcf_record));
2186 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2189 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2187 "2781 FCF (x%x) failed connection " 2190 "2781 FCF (x%x) failed connection "
2188 "list check: (x%x/x%x)\n", 2191 "list check: (x%x/x%x/%x)\n",
2189 bf_get(lpfc_fcf_record_fcf_index, 2192 bf_get(lpfc_fcf_record_fcf_index,
2190 new_fcf_record), 2193 new_fcf_record),
2191 bf_get(lpfc_fcf_record_fcf_avail, 2194 bf_get(lpfc_fcf_record_fcf_avail,
2192 new_fcf_record), 2195 new_fcf_record),
2193 bf_get(lpfc_fcf_record_fcf_valid, 2196 bf_get(lpfc_fcf_record_fcf_valid,
2197 new_fcf_record),
2198 bf_get(lpfc_fcf_record_fcf_sol,
2194 new_fcf_record)); 2199 new_fcf_record));
2195 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 2200 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2196 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2201 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 834b699cac76..2cdeb5434fb7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1305,6 +1305,11 @@ struct lpfc_mbx_mq_create_ext {
1305#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK 1305#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
1306#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 1306#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
1307#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap 1307#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
1308#define LPFC_EVT_CODE_LINK_NO_LINK 0x0
1309#define LPFC_EVT_CODE_LINK_10_MBIT 0x1
1310#define LPFC_EVT_CODE_LINK_100_MBIT 0x2
1311#define LPFC_EVT_CODE_LINK_1_GBIT 0x3
1312#define LPFC_EVT_CODE_LINK_10_GBIT 0x4
1308#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE 1313#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE
1309#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 1314#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001
1310#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap 1315#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap
@@ -1314,6 +1319,13 @@ struct lpfc_mbx_mq_create_ext {
1314#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC 1319#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC
1315#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 1320#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001
1316#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap 1321#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap
1322#define LPFC_EVT_CODE_FC_NO_LINK 0x0
1323#define LPFC_EVT_CODE_FC_1_GBAUD 0x1
1324#define LPFC_EVT_CODE_FC_2_GBAUD 0x2
1325#define LPFC_EVT_CODE_FC_4_GBAUD 0x4
1326#define LPFC_EVT_CODE_FC_8_GBAUD 0x8
1327#define LPFC_EVT_CODE_FC_10_GBAUD 0xA
1328#define LPFC_EVT_CODE_FC_16_GBAUD 0x10
1317#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI 1329#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI
1318#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 1330#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001
1319#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap 1331#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap
@@ -1695,8 +1707,14 @@ struct fcf_record {
1695#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF 1707#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1696#define lpfc_fcf_record_fc_map_2_WORD word7 1708#define lpfc_fcf_record_fc_map_2_WORD word7
1697#define lpfc_fcf_record_fcf_valid_SHIFT 24 1709#define lpfc_fcf_record_fcf_valid_SHIFT 24
1698#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF 1710#define lpfc_fcf_record_fcf_valid_MASK 0x00000001
1699#define lpfc_fcf_record_fcf_valid_WORD word7 1711#define lpfc_fcf_record_fcf_valid_WORD word7
1712#define lpfc_fcf_record_fcf_fc_SHIFT 25
1713#define lpfc_fcf_record_fcf_fc_MASK 0x00000001
1714#define lpfc_fcf_record_fcf_fc_WORD word7
1715#define lpfc_fcf_record_fcf_sol_SHIFT 31
1716#define lpfc_fcf_record_fcf_sol_MASK 0x00000001
1717#define lpfc_fcf_record_fcf_sol_WORD word7
1700 uint32_t word8; 1718 uint32_t word8;
1701#define lpfc_fcf_record_fcf_index_SHIFT 0 1719#define lpfc_fcf_record_fcf_index_SHIFT 0
1702#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF 1720#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 8a55a586dd65..7dc4218d9c4c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1892,8 +1892,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1892 max_speed = 4; 1892 max_speed = 4;
1893 else if (phba->lmt & LMT_2Gb) 1893 else if (phba->lmt & LMT_2Gb)
1894 max_speed = 2; 1894 max_speed = 2;
1895 else 1895 else if (phba->lmt & LMT_1Gb)
1896 max_speed = 1; 1896 max_speed = 1;
1897 else
1898 max_speed = 0;
1897 1899
1898 vp = &phba->vpd; 1900 vp = &phba->vpd;
1899 1901
@@ -2078,9 +2080,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2078 if (descp && descp[0] == '\0') { 2080 if (descp && descp[0] == '\0') {
2079 if (oneConnect) 2081 if (oneConnect)
2080 snprintf(descp, 255, 2082 snprintf(descp, 255,
2081 "Emulex OneConnect %s, %s Initiator, Port %s", 2083 "Emulex OneConnect %s, %s Initiator %s",
2082 m.name, m.function, 2084 m.name, m.function,
2083 phba->Port); 2085 phba->Port);
2086 else if (max_speed == 0)
2087 snprintf(descp, 255,
2088 "Emulex %s %s %s ",
2089 m.name, m.bus, m.function);
2084 else 2090 else
2085 snprintf(descp, 255, 2091 snprintf(descp, 255,
2086 "Emulex %s %d%s %s %s", 2092 "Emulex %s %d%s %s %s",
@@ -3502,6 +3508,119 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3502} 3508}
3503 3509
3504/** 3510/**
3511 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3512 * @phba: pointer to lpfc hba data structure.
3513 *
3514 * This routine is to get an SLI3 FC port's link speed in Mbps.
3515 *
3516 * Return: link speed in terms of Mbps.
3517 **/
3518uint32_t
3519lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3520{
3521 uint32_t link_speed;
3522
3523 if (!lpfc_is_link_up(phba))
3524 return 0;
3525
3526 switch (phba->fc_linkspeed) {
3527 case LPFC_LINK_SPEED_1GHZ:
3528 link_speed = 1000;
3529 break;
3530 case LPFC_LINK_SPEED_2GHZ:
3531 link_speed = 2000;
3532 break;
3533 case LPFC_LINK_SPEED_4GHZ:
3534 link_speed = 4000;
3535 break;
3536 case LPFC_LINK_SPEED_8GHZ:
3537 link_speed = 8000;
3538 break;
3539 case LPFC_LINK_SPEED_10GHZ:
3540 link_speed = 10000;
3541 break;
3542 case LPFC_LINK_SPEED_16GHZ:
3543 link_speed = 16000;
3544 break;
3545 default:
3546 link_speed = 0;
3547 }
3548 return link_speed;
3549}
3550
3551/**
3552 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3553 * @phba: pointer to lpfc hba data structure.
3554 * @evt_code: asynchronous event code.
3555 * @speed_code: asynchronous event link speed code.
3556 *
3557 * This routine is to parse the giving SLI4 async event link speed code into
3558 * value of Mbps for the link speed.
3559 *
3560 * Return: link speed in terms of Mbps.
3561 **/
3562static uint32_t
3563lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3564 uint8_t speed_code)
3565{
3566 uint32_t port_speed;
3567
3568 switch (evt_code) {
3569 case LPFC_TRAILER_CODE_LINK:
3570 switch (speed_code) {
3571 case LPFC_EVT_CODE_LINK_NO_LINK:
3572 port_speed = 0;
3573 break;
3574 case LPFC_EVT_CODE_LINK_10_MBIT:
3575 port_speed = 10;
3576 break;
3577 case LPFC_EVT_CODE_LINK_100_MBIT:
3578 port_speed = 100;
3579 break;
3580 case LPFC_EVT_CODE_LINK_1_GBIT:
3581 port_speed = 1000;
3582 break;
3583 case LPFC_EVT_CODE_LINK_10_GBIT:
3584 port_speed = 10000;
3585 break;
3586 default:
3587 port_speed = 0;
3588 }
3589 break;
3590 case LPFC_TRAILER_CODE_FC:
3591 switch (speed_code) {
3592 case LPFC_EVT_CODE_FC_NO_LINK:
3593 port_speed = 0;
3594 break;
3595 case LPFC_EVT_CODE_FC_1_GBAUD:
3596 port_speed = 1000;
3597 break;
3598 case LPFC_EVT_CODE_FC_2_GBAUD:
3599 port_speed = 2000;
3600 break;
3601 case LPFC_EVT_CODE_FC_4_GBAUD:
3602 port_speed = 4000;
3603 break;
3604 case LPFC_EVT_CODE_FC_8_GBAUD:
3605 port_speed = 8000;
3606 break;
3607 case LPFC_EVT_CODE_FC_10_GBAUD:
3608 port_speed = 10000;
3609 break;
3610 case LPFC_EVT_CODE_FC_16_GBAUD:
3611 port_speed = 16000;
3612 break;
3613 default:
3614 port_speed = 0;
3615 }
3616 break;
3617 default:
3618 port_speed = 0;
3619 }
3620 return port_speed;
3621}
3622
3623/**
3505 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3624 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3506 * @phba: pointer to lpfc hba data structure. 3625 * @phba: pointer to lpfc hba data structure.
3507 * @acqe_link: pointer to the async link completion queue entry. 3626 * @acqe_link: pointer to the async link completion queue entry.
@@ -3558,7 +3677,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3558 3677
3559 /* Keep the link status for extra SLI4 state machine reference */ 3678 /* Keep the link status for extra SLI4 state machine reference */
3560 phba->sli4_hba.link_state.speed = 3679 phba->sli4_hba.link_state.speed =
3561 bf_get(lpfc_acqe_link_speed, acqe_link); 3680 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3681 bf_get(lpfc_acqe_link_speed, acqe_link));
3562 phba->sli4_hba.link_state.duplex = 3682 phba->sli4_hba.link_state.duplex =
3563 bf_get(lpfc_acqe_link_duplex, acqe_link); 3683 bf_get(lpfc_acqe_link_duplex, acqe_link);
3564 phba->sli4_hba.link_state.status = 3684 phba->sli4_hba.link_state.status =
@@ -3570,7 +3690,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3570 phba->sli4_hba.link_state.fault = 3690 phba->sli4_hba.link_state.fault =
3571 bf_get(lpfc_acqe_link_fault, acqe_link); 3691 bf_get(lpfc_acqe_link_fault, acqe_link);
3572 phba->sli4_hba.link_state.logical_speed = 3692 phba->sli4_hba.link_state.logical_speed =
3573 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3693 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3694
3574 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3695 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3575 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3696 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3576 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3697 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
@@ -3580,7 +3701,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3580 phba->sli4_hba.link_state.status, 3701 phba->sli4_hba.link_state.status,
3581 phba->sli4_hba.link_state.type, 3702 phba->sli4_hba.link_state.type,
3582 phba->sli4_hba.link_state.number, 3703 phba->sli4_hba.link_state.number,
3583 phba->sli4_hba.link_state.logical_speed * 10, 3704 phba->sli4_hba.link_state.logical_speed,
3584 phba->sli4_hba.link_state.fault); 3705 phba->sli4_hba.link_state.fault);
3585 /* 3706 /*
3586 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3707 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
@@ -3652,7 +3773,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3652 } 3773 }
3653 /* Keep the link status for extra SLI4 state machine reference */ 3774 /* Keep the link status for extra SLI4 state machine reference */
3654 phba->sli4_hba.link_state.speed = 3775 phba->sli4_hba.link_state.speed =
3655 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3776 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3777 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3656 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3778 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3657 phba->sli4_hba.link_state.topology = 3779 phba->sli4_hba.link_state.topology =
3658 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3780 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
@@ -3665,7 +3787,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3665 phba->sli4_hba.link_state.fault = 3787 phba->sli4_hba.link_state.fault =
3666 bf_get(lpfc_acqe_link_fault, acqe_fc); 3788 bf_get(lpfc_acqe_link_fault, acqe_fc);
3667 phba->sli4_hba.link_state.logical_speed = 3789 phba->sli4_hba.link_state.logical_speed =
3668 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3790 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3669 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3791 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3670 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3792 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3671 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3793 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
@@ -3675,7 +3797,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3675 phba->sli4_hba.link_state.status, 3797 phba->sli4_hba.link_state.status,
3676 phba->sli4_hba.link_state.type, 3798 phba->sli4_hba.link_state.type,
3677 phba->sli4_hba.link_state.number, 3799 phba->sli4_hba.link_state.number,
3678 phba->sli4_hba.link_state.logical_speed * 10, 3800 phba->sli4_hba.link_state.logical_speed,
3679 phba->sli4_hba.link_state.fault); 3801 phba->sli4_hba.link_state.fault);
3680 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3802 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3681 if (!pmb) { 3803 if (!pmb) {
@@ -3783,14 +3905,18 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3783 case LPFC_SLI_EVENT_STATUS_VALID: 3905 case LPFC_SLI_EVENT_STATUS_VALID:
3784 return; /* no message if the sfp is okay */ 3906 return; /* no message if the sfp is okay */
3785 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 3907 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
3786 sprintf(message, "Not installed"); 3908 sprintf(message, "Optics faulted/incorrectly installed/not " \
3909 "installed - Reseat optics, if issue not "
3910 "resolved, replace.");
3787 break; 3911 break;
3788 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 3912 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3789 sprintf(message, 3913 sprintf(message,
3790 "Optics of two types installed"); 3914 "Optics of two types installed - Remove one optic or " \
3915 "install matching pair of optics.");
3791 break; 3916 break;
3792 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 3917 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
3793 sprintf(message, "Incompatible optics"); 3918 sprintf(message, "Incompatible optics - Replace with " \
3919 "compatible optics for card to function.");
3794 break; 3920 break;
3795 default: 3921 default:
3796 /* firmware is reporting a status we don't know about */ 3922 /* firmware is reporting a status we don't know about */
@@ -4161,11 +4287,11 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4161 phba->fcoe_eventtag = acqe_grp5->event_tag; 4287 phba->fcoe_eventtag = acqe_grp5->event_tag;
4162 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4288 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4163 phba->sli4_hba.link_state.logical_speed = 4289 phba->sli4_hba.link_state.logical_speed =
4164 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 4290 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4291 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4166 "2789 GRP5 Async Event: Updating logical link speed " 4292 "2789 GRP5 Async Event: Updating logical link speed "
4167 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 4293 "from %dMbps to %dMbps\n", prev_ll_spd,
4168 (phba->sli4_hba.link_state.logical_speed*10)); 4294 phba->sli4_hba.link_state.logical_speed);
4169} 4295}
4170 4296
4171/** 4297/**
@@ -4947,7 +5073,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4947 } 5073 }
4948 5074
4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5075 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4950 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 5076 phba->cfg_fcp_io_channel), GFP_KERNEL);
4951 if (!phba->sli4_hba.msix_entries) { 5077 if (!phba->sli4_hba.msix_entries) {
4952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4953 "2573 Failed allocate memory for msi-x " 5079 "2573 Failed allocate memory for msi-x "
@@ -6559,7 +6685,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6559 i++; 6685 i++;
6560 } 6686 }
6561 if (i < cfg_fcp_io_channel) { 6687 if (i < cfg_fcp_io_channel) {
6562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6688 lpfc_printf_log(phba,
6689 KERN_ERR, LOG_INIT,
6563 "3188 Reducing IO channels to match number of " 6690 "3188 Reducing IO channels to match number of "
6564 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); 6691 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6565 cfg_fcp_io_channel = i; 6692 cfg_fcp_io_channel = i;
@@ -6567,8 +6694,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6567 6694
6568 if (cfg_fcp_io_channel > 6695 if (cfg_fcp_io_channel >
6569 phba->sli4_hba.max_cfg_param.max_eq) { 6696 phba->sli4_hba.max_cfg_param.max_eq) {
6570 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6697 if (phba->sli4_hba.max_cfg_param.max_eq <
6571 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) { 6698 LPFC_FCP_IO_CHAN_MIN) {
6572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6573 "2574 Not enough EQs (%d) from the " 6700 "2574 Not enough EQs (%d) from the "
6574 "pci function for supporting FCP " 6701 "pci function for supporting FCP "
@@ -6577,13 +6704,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6577 phba->cfg_fcp_io_channel); 6704 phba->cfg_fcp_io_channel);
6578 goto out_error; 6705 goto out_error;
6579 } 6706 }
6580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6581 "2575 Not enough EQs (%d) from the pci " 6708 "2575 Reducing IO channels to match number of "
6582 "function for supporting the requested " 6709 "available EQs: from %d to %d\n",
6583 "FCP EQs (%d), the actual FCP EQs can " 6710 cfg_fcp_io_channel,
6584 "be supported: %d\n", 6711 phba->sli4_hba.max_cfg_param.max_eq);
6585 phba->sli4_hba.max_cfg_param.max_eq, 6712 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6586 phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
6587 } 6713 }
6588 6714
6589 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ 6715 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
@@ -6592,7 +6718,6 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6592 phba->cfg_fcp_eq_count = cfg_fcp_io_channel; 6718 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6593 phba->cfg_fcp_wq_count = cfg_fcp_io_channel; 6719 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6594 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 6720 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6595 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
6596 6721
6597 /* Get EQ depth from module parameter, fake the default for now */ 6722 /* Get EQ depth from module parameter, fake the default for now */
6598 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6723 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8095,11 +8220,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8095 int vectors, rc, index; 8220 int vectors, rc, index;
8096 8221
8097 /* Set up MSI-X multi-message vectors */ 8222 /* Set up MSI-X multi-message vectors */
8098 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 8223 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8099 phba->sli4_hba.msix_entries[index].entry = index; 8224 phba->sli4_hba.msix_entries[index].entry = index;
8100 8225
8101 /* Configure MSI-X capability structure */ 8226 /* Configure MSI-X capability structure */
8102 vectors = phba->sli4_hba.cfg_eqn; 8227 vectors = phba->cfg_fcp_io_channel;
8103enable_msix_vectors: 8228enable_msix_vectors:
8104 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8229 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8105 vectors); 8230 vectors);
@@ -8142,8 +8267,14 @@ enable_msix_vectors:
8142 goto cfg_fail_out; 8267 goto cfg_fail_out;
8143 } 8268 }
8144 } 8269 }
8145 phba->sli4_hba.msix_vec_nr = vectors;
8146 8270
8271 if (vectors != phba->cfg_fcp_io_channel) {
8272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8273 "3238 Reducing IO channels to match number of "
8274 "MSI-X vectors, requested %d got %d\n",
8275 phba->cfg_fcp_io_channel, vectors);
8276 phba->cfg_fcp_io_channel = vectors;
8277 }
8147 return rc; 8278 return rc;
8148 8279
8149cfg_fail_out: 8280cfg_fail_out:
@@ -8171,7 +8302,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8171 int index; 8302 int index;
8172 8303
8173 /* Free up MSI-X multi-message vectors */ 8304 /* Free up MSI-X multi-message vectors */
8174 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++) 8305 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8175 free_irq(phba->sli4_hba.msix_entries[index].vector, 8306 free_irq(phba->sli4_hba.msix_entries[index].vector,
8176 &phba->sli4_hba.fcp_eq_hdl[index]); 8307 &phba->sli4_hba.fcp_eq_hdl[index]);
8177 8308
@@ -9304,23 +9435,28 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9304 9435
9305/** 9436/**
9306 * lpfc_write_firmware - attempt to write a firmware image to the port 9437 * lpfc_write_firmware - attempt to write a firmware image to the port
9307 * @phba: pointer to lpfc hba data structure.
9308 * @fw: pointer to firmware image returned from request_firmware. 9438 * @fw: pointer to firmware image returned from request_firmware.
9439 * @phba: pointer to lpfc hba data structure.
9309 * 9440 *
9310 * returns the number of bytes written if write is successful.
9311 * returns a negative error value if there were errors.
9312 * returns 0 if firmware matches currently active firmware on port.
9313 **/ 9441 **/
9314int 9442static void
9315lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 9443lpfc_write_firmware(const struct firmware *fw, void *context)
9316{ 9444{
9445 struct lpfc_hba *phba = (struct lpfc_hba *)context;
9317 char fwrev[FW_REV_STR_SIZE]; 9446 char fwrev[FW_REV_STR_SIZE];
9318 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 9447 struct lpfc_grp_hdr *image;
9319 struct list_head dma_buffer_list; 9448 struct list_head dma_buffer_list;
9320 int i, rc = 0; 9449 int i, rc = 0;
9321 struct lpfc_dmabuf *dmabuf, *next; 9450 struct lpfc_dmabuf *dmabuf, *next;
9322 uint32_t offset = 0, temp_offset = 0; 9451 uint32_t offset = 0, temp_offset = 0;
9323 9452
9453 /* It can be null, sanity check */
9454 if (!fw) {
9455 rc = -ENXIO;
9456 goto out;
9457 }
9458 image = (struct lpfc_grp_hdr *)fw->data;
9459
9324 INIT_LIST_HEAD(&dma_buffer_list); 9460 INIT_LIST_HEAD(&dma_buffer_list);
9325 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 9461 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9326 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 9462 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
@@ -9333,12 +9469,13 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9333 be32_to_cpu(image->magic_number), 9469 be32_to_cpu(image->magic_number),
9334 bf_get_be32(lpfc_grp_hdr_file_type, image), 9470 bf_get_be32(lpfc_grp_hdr_file_type, image),
9335 bf_get_be32(lpfc_grp_hdr_id, image)); 9471 bf_get_be32(lpfc_grp_hdr_id, image));
9336 return -EINVAL; 9472 rc = -EINVAL;
9473 goto release_out;
9337 } 9474 }
9338 lpfc_decode_firmware_rev(phba, fwrev, 1); 9475 lpfc_decode_firmware_rev(phba, fwrev, 1);
9339 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 9476 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9341 "3023 Updating Firmware. Current Version:%s " 9478 "3023 Updating Firmware, Current Version:%s "
9342 "New Version:%s\n", 9479 "New Version:%s\n",
9343 fwrev, image->revision); 9480 fwrev, image->revision);
9344 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 9481 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
@@ -9346,7 +9483,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9346 GFP_KERNEL); 9483 GFP_KERNEL);
9347 if (!dmabuf) { 9484 if (!dmabuf) {
9348 rc = -ENOMEM; 9485 rc = -ENOMEM;
9349 goto out; 9486 goto release_out;
9350 } 9487 }
9351 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9488 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9352 SLI4_PAGE_SIZE, 9489 SLI4_PAGE_SIZE,
@@ -9355,7 +9492,7 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9355 if (!dmabuf->virt) { 9492 if (!dmabuf->virt) {
9356 kfree(dmabuf); 9493 kfree(dmabuf);
9357 rc = -ENOMEM; 9494 rc = -ENOMEM;
9358 goto out; 9495 goto release_out;
9359 } 9496 }
9360 list_add_tail(&dmabuf->list, &dma_buffer_list); 9497 list_add_tail(&dmabuf->list, &dma_buffer_list);
9361 } 9498 }
@@ -9375,23 +9512,24 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9375 } 9512 }
9376 rc = lpfc_wr_object(phba, &dma_buffer_list, 9513 rc = lpfc_wr_object(phba, &dma_buffer_list,
9377 (fw->size - offset), &offset); 9514 (fw->size - offset), &offset);
9378 if (rc) { 9515 if (rc)
9379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9516 goto release_out;
9380 "3024 Firmware update failed. "
9381 "%d\n", rc);
9382 goto out;
9383 }
9384 } 9517 }
9385 rc = offset; 9518 rc = offset;
9386 } 9519 }
9387out: 9520
9521release_out:
9388 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 9522 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9389 list_del(&dmabuf->list); 9523 list_del(&dmabuf->list);
9390 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 9524 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9391 dmabuf->virt, dmabuf->phys); 9525 dmabuf->virt, dmabuf->phys);
9392 kfree(dmabuf); 9526 kfree(dmabuf);
9393 } 9527 }
9394 return rc; 9528 release_firmware(fw);
9529out:
9530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9531 "3024 Firmware update done: %d.", rc);
9532 return;
9395} 9533}
9396 9534
9397/** 9535/**
@@ -9418,12 +9556,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9418 struct lpfc_hba *phba; 9556 struct lpfc_hba *phba;
9419 struct lpfc_vport *vport = NULL; 9557 struct lpfc_vport *vport = NULL;
9420 struct Scsi_Host *shost = NULL; 9558 struct Scsi_Host *shost = NULL;
9421 int error; 9559 int error, ret;
9422 uint32_t cfg_mode, intr_mode; 9560 uint32_t cfg_mode, intr_mode;
9423 int mcnt; 9561 int mcnt;
9424 int adjusted_fcp_io_channel; 9562 int adjusted_fcp_io_channel;
9425 const struct firmware *fw; 9563 uint8_t file_name[ELX_MODEL_NAME_SIZE];
9426 uint8_t file_name[16];
9427 9564
9428 /* Allocate memory for HBA structure */ 9565 /* Allocate memory for HBA structure */
9429 phba = lpfc_hba_alloc(pdev); 9566 phba = lpfc_hba_alloc(pdev);
@@ -9525,9 +9662,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9525 /* Default to single EQ for non-MSI-X */ 9662 /* Default to single EQ for non-MSI-X */
9526 if (phba->intr_type != MSIX) 9663 if (phba->intr_type != MSIX)
9527 adjusted_fcp_io_channel = 1; 9664 adjusted_fcp_io_channel = 1;
9528 else if (phba->sli4_hba.msix_vec_nr <
9529 phba->cfg_fcp_io_channel)
9530 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
9531 else 9665 else
9532 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 9666 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9533 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 9667 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
@@ -9572,12 +9706,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9572 /* check for firmware upgrade or downgrade (if_type 2 only) */ 9706 /* check for firmware upgrade or downgrade (if_type 2 only) */
9573 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 9707 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9574 LPFC_SLI_INTF_IF_TYPE_2) { 9708 LPFC_SLI_INTF_IF_TYPE_2) {
9575 snprintf(file_name, 16, "%s.grp", phba->ModelName); 9709 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
9576 error = request_firmware(&fw, file_name, &phba->pcidev->dev); 9710 phba->ModelName);
9577 if (!error) { 9711 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
9578 lpfc_write_firmware(phba, fw); 9712 file_name, &phba->pcidev->dev,
9579 release_firmware(fw); 9713 GFP_KERNEL, (void *)phba,
9580 } 9714 lpfc_write_firmware);
9581 } 9715 }
9582 9716
9583 /* Check if there are static vports to be created. */ 9717 /* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 64013f3097ad..7f45ac9964a9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3829,9 +3829,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3829 cmd->scsi_done(cmd); 3829 cmd->scsi_done(cmd);
3830 3830
3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3832 spin_lock_irq(&phba->hbalock); 3832 spin_lock_irqsave(&phba->hbalock, flags);
3833 lpfc_cmd->pCmd = NULL; 3833 lpfc_cmd->pCmd = NULL;
3834 spin_unlock_irq(&phba->hbalock); 3834 spin_unlock_irqrestore(&phba->hbalock, flags);
3835 3835
3836 /* 3836 /*
3837 * If there is a thread waiting for command completion 3837 * If there is a thread waiting for command completion
@@ -3871,9 +3871,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3871 } 3871 }
3872 } 3872 }
3873 3873
3874 spin_lock_irq(&phba->hbalock); 3874 spin_lock_irqsave(&phba->hbalock, flags);
3875 lpfc_cmd->pCmd = NULL; 3875 lpfc_cmd->pCmd = NULL;
3876 spin_unlock_irq(&phba->hbalock); 3876 spin_unlock_irqrestore(&phba->hbalock, flags);
3877 3877
3878 /* 3878 /*
3879 * If there is a thread waiting for command completion 3879 * If there is a thread waiting for command completion
@@ -4163,7 +4163,7 @@ lpfc_info(struct Scsi_Host *host)
4163{ 4163{
4164 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 4164 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4165 struct lpfc_hba *phba = vport->phba; 4165 struct lpfc_hba *phba = vport->phba;
4166 int len; 4166 int len, link_speed = 0;
4167 static char lpfcinfobuf[384]; 4167 static char lpfcinfobuf[384];
4168 4168
4169 memset(lpfcinfobuf,0,384); 4169 memset(lpfcinfobuf,0,384);
@@ -4184,12 +4184,18 @@ lpfc_info(struct Scsi_Host *host)
4184 phba->Port); 4184 phba->Port);
4185 } 4185 }
4186 len = strlen(lpfcinfobuf); 4186 len = strlen(lpfcinfobuf);
4187 if (phba->sli4_hba.link_state.logical_speed) { 4187 if (phba->sli_rev <= LPFC_SLI_REV3) {
4188 snprintf(lpfcinfobuf + len, 4188 link_speed = lpfc_sli_port_speed_get(phba);
4189 384-len, 4189 } else {
4190 " Logical Link Speed: %d Mbps", 4190 if (phba->sli4_hba.link_state.logical_speed)
4191 phba->sli4_hba.link_state.logical_speed * 10); 4191 link_speed =
4192 phba->sli4_hba.link_state.logical_speed;
4193 else
4194 link_speed = phba->sli4_hba.link_state.speed;
4192 } 4195 }
4196 if (link_speed != 0)
4197 snprintf(lpfcinfobuf + len, 384-len,
4198 " Logical Link Speed: %d Mbps", link_speed);
4193 } 4199 }
4194 return lpfcinfobuf; 4200 return lpfcinfobuf;
4195} 4201}
@@ -4398,16 +4404,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4398 struct lpfc_scsi_buf *lpfc_cmd; 4404 struct lpfc_scsi_buf *lpfc_cmd;
4399 IOCB_t *cmd, *icmd; 4405 IOCB_t *cmd, *icmd;
4400 int ret = SUCCESS, status = 0; 4406 int ret = SUCCESS, status = 0;
4407 unsigned long flags;
4401 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4408 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4402 4409
4403 status = fc_block_scsi_eh(cmnd); 4410 status = fc_block_scsi_eh(cmnd);
4404 if (status != 0 && status != SUCCESS) 4411 if (status != 0 && status != SUCCESS)
4405 return status; 4412 return status;
4406 4413
4407 spin_lock_irq(&phba->hbalock); 4414 spin_lock_irqsave(&phba->hbalock, flags);
4408 /* driver queued commands are in process of being flushed */ 4415 /* driver queued commands are in process of being flushed */
4409 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 4416 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4410 spin_unlock_irq(&phba->hbalock); 4417 spin_unlock_irqrestore(&phba->hbalock, flags);
4411 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4412 "3168 SCSI Layer abort requested I/O has been " 4419 "3168 SCSI Layer abort requested I/O has been "
4413 "flushed by LLD.\n"); 4420 "flushed by LLD.\n");
@@ -4416,7 +4423,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4416 4423
4417 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 4424 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4418 if (!lpfc_cmd || !lpfc_cmd->pCmd) { 4425 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4419 spin_unlock_irq(&phba->hbalock); 4426 spin_unlock_irqrestore(&phba->hbalock, flags);
4420 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4427 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4421 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4428 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4422 "x%x ID %d LUN %d\n", 4429 "x%x ID %d LUN %d\n",
@@ -4427,7 +4434,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4427 iocb = &lpfc_cmd->cur_iocbq; 4434 iocb = &lpfc_cmd->cur_iocbq;
4428 /* the command is in process of being cancelled */ 4435 /* the command is in process of being cancelled */
4429 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 4436 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4430 spin_unlock_irq(&phba->hbalock); 4437 spin_unlock_irqrestore(&phba->hbalock, flags);
4431 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4438 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4432 "3169 SCSI Layer abort requested I/O has been " 4439 "3169 SCSI Layer abort requested I/O has been "
4433 "cancelled by LLD.\n"); 4440 "cancelled by LLD.\n");
@@ -4484,7 +4491,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4484 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4491 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4485 abtsiocb->vport = vport; 4492 abtsiocb->vport = vport;
4486 /* no longer need the lock after this point */ 4493 /* no longer need the lock after this point */
4487 spin_unlock_irq(&phba->hbalock); 4494 spin_unlock_irqrestore(&phba->hbalock, flags);
4488 4495
4489 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 4496 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4490 IOCB_ERROR) { 4497 IOCB_ERROR) {
@@ -4516,7 +4523,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4516 goto out; 4523 goto out;
4517 4524
4518out_unlock: 4525out_unlock:
4519 spin_unlock_irq(&phba->hbalock); 4526 spin_unlock_irqrestore(&phba->hbalock, flags);
4520out: 4527out:
4521 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4528 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4522 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 4529 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 219bf534ef99..d7f3313ef886 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3964,9 +3964,9 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3964 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3964 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3965 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3965 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3966 3966
3967 /* Perform FCoE PCI function reset */ 3967 /* Perform FCoE PCI function reset before freeing queue memory */
3968 lpfc_sli4_queue_destroy(phba);
3969 rc = lpfc_pci_function_reset(phba); 3968 rc = lpfc_pci_function_reset(phba);
3969 lpfc_sli4_queue_destroy(phba);
3970 3970
3971 /* Restore PCI cmd register */ 3971 /* Restore PCI cmd register */
3972 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3972 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
@@ -7072,6 +7072,40 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7072} 7072}
7073 7073
7074/** 7074/**
7075 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7076 * @phba: Pointer to HBA context object.
7077 * @mboxq: Pointer to mailbox object.
7078 *
7079 * The function waits for the bootstrap mailbox register ready bit from
7080 * port for twice the regular mailbox command timeout value.
7081 *
7082 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7083 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7084 **/
7085static int
7086lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7087{
7088 uint32_t db_ready;
7089 unsigned long timeout;
7090 struct lpfc_register bmbx_reg;
7091
7092 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7093 * 1000) + jiffies;
7094
7095 do {
7096 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7097 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7098 if (!db_ready)
7099 msleep(2);
7100
7101 if (time_after(jiffies, timeout))
7102 return MBXERR_ERROR;
7103 } while (!db_ready);
7104
7105 return 0;
7106}
7107
7108/**
7075 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7109 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7076 * @phba: Pointer to HBA context object. 7110 * @phba: Pointer to HBA context object.
7077 * @mboxq: Pointer to mailbox object. 7111 * @mboxq: Pointer to mailbox object.
@@ -7092,15 +7126,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7092{ 7126{
7093 int rc = MBX_SUCCESS; 7127 int rc = MBX_SUCCESS;
7094 unsigned long iflag; 7128 unsigned long iflag;
7095 uint32_t db_ready;
7096 uint32_t mcqe_status; 7129 uint32_t mcqe_status;
7097 uint32_t mbx_cmnd; 7130 uint32_t mbx_cmnd;
7098 unsigned long timeout;
7099 struct lpfc_sli *psli = &phba->sli; 7131 struct lpfc_sli *psli = &phba->sli;
7100 struct lpfc_mqe *mb = &mboxq->u.mqe; 7132 struct lpfc_mqe *mb = &mboxq->u.mqe;
7101 struct lpfc_bmbx_create *mbox_rgn; 7133 struct lpfc_bmbx_create *mbox_rgn;
7102 struct dma_address *dma_address; 7134 struct dma_address *dma_address;
7103 struct lpfc_register bmbx_reg;
7104 7135
7105 /* 7136 /*
7106 * Only one mailbox can be active to the bootstrap mailbox region 7137 * Only one mailbox can be active to the bootstrap mailbox region
@@ -7124,6 +7155,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7124 phba->sli.mbox_active = mboxq; 7155 phba->sli.mbox_active = mboxq;
7125 spin_unlock_irqrestore(&phba->hbalock, iflag); 7156 spin_unlock_irqrestore(&phba->hbalock, iflag);
7126 7157
7158 /* wait for bootstrap mbox register for readyness */
7159 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7160 if (rc)
7161 goto exit;
7162
7127 /* 7163 /*
7128 * Initialize the bootstrap memory region to avoid stale data areas 7164 * Initialize the bootstrap memory region to avoid stale data areas
7129 * in the mailbox post. Then copy the caller's mailbox contents to 7165 * in the mailbox post. Then copy the caller's mailbox contents to
@@ -7138,35 +7174,18 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7138 dma_address = &phba->sli4_hba.bmbx.dma_address; 7174 dma_address = &phba->sli4_hba.bmbx.dma_address;
7139 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7175 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7140 7176
7141 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7177 /* wait for bootstrap mbox register for hi-address write done */
7142 * 1000) + jiffies; 7178 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7143 do { 7179 if (rc)
7144 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7180 goto exit;
7145 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7146 if (!db_ready)
7147 msleep(2);
7148
7149 if (time_after(jiffies, timeout)) {
7150 rc = MBXERR_ERROR;
7151 goto exit;
7152 }
7153 } while (!db_ready);
7154 7181
7155 /* Post the low mailbox dma address to the port. */ 7182 /* Post the low mailbox dma address to the port. */
7156 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7183 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7157 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7158 * 1000) + jiffies;
7159 do {
7160 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7161 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7162 if (!db_ready)
7163 msleep(2);
7164 7184
7165 if (time_after(jiffies, timeout)) { 7185 /* wait for bootstrap mbox register for low address write done */
7166 rc = MBXERR_ERROR; 7186 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7167 goto exit; 7187 if (rc)
7168 } 7188 goto exit;
7169 } while (!db_ready);
7170 7189
7171 /* 7190 /*
7172 * Read the CQ to ensure the mailbox has completed. 7191 * Read the CQ to ensure the mailbox has completed.
@@ -8090,6 +8109,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8090 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8109 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8091 LPFC_WQE_LENLOC_NONE); 8110 LPFC_WQE_LENLOC_NONE);
8092 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8111 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8112 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8113 iocbq->iocb.ulpFCP2Rcvy);
8093 break; 8114 break;
8094 case CMD_GEN_REQUEST64_CR: 8115 case CMD_GEN_REQUEST64_CR:
8095 /* For this command calculate the xmit length of the 8116 /* For this command calculate the xmit length of the
@@ -12099,6 +12120,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12099 struct lpfc_queue *eq; 12120 struct lpfc_queue *eq;
12100 int cnt, rc, length, status = 0; 12121 int cnt, rc, length, status = 0;
12101 uint32_t shdr_status, shdr_add_status; 12122 uint32_t shdr_status, shdr_add_status;
12123 uint32_t result;
12102 int fcp_eqidx; 12124 int fcp_eqidx;
12103 union lpfc_sli4_cfg_shdr *shdr; 12125 union lpfc_sli4_cfg_shdr *shdr;
12104 uint16_t dmult; 12126 uint16_t dmult;
@@ -12117,8 +12139,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12117 eq_delay = &mbox->u.mqe.un.eq_delay; 12139 eq_delay = &mbox->u.mqe.un.eq_delay;
12118 12140
12119 /* Calculate delay multiper from maximum interrupt per second */ 12141 /* Calculate delay multiper from maximum interrupt per second */
12120 dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; 12142 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12121 dmult = LPFC_DMULT_CONST/dmult - 1; 12143 if (result > LPFC_DMULT_CONST)
12144 dmult = 0;
12145 else
12146 dmult = LPFC_DMULT_CONST/result - 1;
12122 12147
12123 cnt = 0; 12148 cnt = 0;
12124 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; 12149 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
@@ -12174,7 +12199,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12174 * fails this function will return -ENXIO. 12199 * fails this function will return -ENXIO.
12175 **/ 12200 **/
12176uint32_t 12201uint32_t
12177lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 12202lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12178{ 12203{
12179 struct lpfc_mbx_eq_create *eq_create; 12204 struct lpfc_mbx_eq_create *eq_create;
12180 LPFC_MBOXQ_t *mbox; 12205 LPFC_MBOXQ_t *mbox;
@@ -12206,7 +12231,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
12206 LPFC_EQE_SIZE); 12231 LPFC_EQE_SIZE);
12207 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 12232 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12208 /* Calculate delay multiper from maximum interrupt per second */ 12233 /* Calculate delay multiper from maximum interrupt per second */
12209 dmult = LPFC_DMULT_CONST/imax - 1; 12234 if (imax > LPFC_DMULT_CONST)
12235 dmult = 0;
12236 else
12237 dmult = LPFC_DMULT_CONST/imax - 1;
12210 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 12238 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12211 dmult); 12239 dmult);
12212 switch (eq->entry_count) { 12240 switch (eq->entry_count) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index bd4bc4342ae2..f44a06a4c6e7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -37,7 +37,7 @@
37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ 37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
38#define LPFC_FCP_IO_CHAN_DEF 4 38#define LPFC_FCP_IO_CHAN_DEF 4
39#define LPFC_FCP_IO_CHAN_MIN 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FCP_IO_CHAN_MAX 8 40#define LPFC_FCP_IO_CHAN_MAX 16
41 41
42/* 42/*
43 * Provide the default FCF Record attributes used by the driver 43 * Provide the default FCF Record attributes used by the driver
@@ -168,7 +168,7 @@ struct lpfc_queue {
168}; 168};
169 169
170struct lpfc_sli4_link { 170struct lpfc_sli4_link {
171 uint8_t speed; 171 uint16_t speed;
172 uint8_t duplex; 172 uint8_t duplex;
173 uint8_t status; 173 uint8_t status;
174 uint8_t type; 174 uint8_t type;
@@ -490,8 +490,6 @@ struct lpfc_sli4_hba {
490 struct lpfc_pc_sli4_params pc_sli4_params; 490 struct lpfc_pc_sli4_params pc_sli4_params;
491 struct msix_entry *msix_entries; 491 struct msix_entry *msix_entries;
492 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; 492 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
493 uint32_t cfg_eqn;
494 uint32_t msix_vec_nr;
495 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 493 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
496 494
497 /* Pointers to the constructed SLI4 queues */ 495 /* Pointers to the constructed SLI4 queues */
@@ -626,7 +624,7 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *);
626struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 624struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
627 uint32_t); 625 uint32_t);
628void lpfc_sli4_queue_free(struct lpfc_queue *); 626void lpfc_sli4_queue_free(struct lpfc_queue *);
629uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); 627uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
630uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); 628uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
631uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, 629uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
632 struct lpfc_queue *, uint32_t, uint32_t); 630 struct lpfc_queue *, uint32_t, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 04265a1c4e52..0c2149189dda 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.34" 21#define LPFC_DRIVER_VERSION "8.3.35"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fcb005fa4bd1..16b7a72a70c4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2003-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.18-rc1" 36#define MEGASAS_VERSION "06.504.01.00-rc1"
37#define MEGASAS_RELDATE "Jun. 17, 2012" 37#define MEGASAS_RELDATE "Oct. 1, 2012"
38#define MEGASAS_EXT_VERSION "Tue. Jun. 17 17:00:00 PDT 2012" 38#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0393ec478cdf..d2c5366aff7f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2003-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.18-rc1 21 * Version : v06.504.01.00-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -71,6 +71,10 @@ static int msix_disable;
71module_param(msix_disable, int, S_IRUGO); 71module_param(msix_disable, int, S_IRUGO);
72MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
73 73
74static unsigned int msix_vectors;
75module_param(msix_vectors, int, S_IRUGO);
76MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
77
74static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 78static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
75module_param(throttlequeuedepth, int, S_IRUGO); 79module_param(throttlequeuedepth, int, S_IRUGO);
76MODULE_PARM_DESC(throttlequeuedepth, 80MODULE_PARM_DESC(throttlequeuedepth,
@@ -3520,6 +3524,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
3520 instance->msix_vectors = (readl(&instance->reg_set-> 3524 instance->msix_vectors = (readl(&instance->reg_set->
3521 outbound_scratch_pad_2 3525 outbound_scratch_pad_2
3522 ) & 0x1F) + 1; 3526 ) & 0x1F) + 1;
3527 if (msix_vectors)
3528 instance->msix_vectors =
3529 min(msix_vectors,
3530 instance->msix_vectors);
3523 } else 3531 } else
3524 instance->msix_vectors = 1; 3532 instance->msix_vectors = 1;
3525 /* Don't bother allocating more MSI-X vectors than cpus */ 3533 /* Don't bother allocating more MSI-X vectors than cpus */
@@ -5233,7 +5241,6 @@ megasas_aen_polling(struct work_struct *work)
5233 5241
5234 case MR_EVT_PD_REMOVED: 5242 case MR_EVT_PD_REMOVED:
5235 if (megasas_get_pd_list(instance) == 0) { 5243 if (megasas_get_pd_list(instance) == 0) {
5236 megasas_get_pd_list(instance);
5237 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 5244 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
5238 for (j = 0; 5245 for (j = 0;
5239 j < MEGASAS_MAX_DEV_PER_CHANNEL; 5246 j < MEGASAS_MAX_DEV_PER_CHANNEL;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e3d251a2e26a..a11df82474ef 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2009-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ddf094e7d0ac..74030aff69ad 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2009-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -1184,8 +1184,6 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1184 io_request->CDB.EEDP32.PrimaryReferenceTag = 1184 io_request->CDB.EEDP32.PrimaryReferenceTag =
1185 cpu_to_be32(ref_tag); 1185 cpu_to_be32(ref_tag);
1186 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1186 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
1187
1188 io_request->DataLength = num_blocks * 512;
1189 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1187 io_request->IoFlags = 32; /* Specify 32-byte cdb */
1190 1188
1191 /* Transfer length */ 1189 /* Transfer length */
@@ -1329,7 +1327,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1329 struct megasas_cmd_fusion *cmd) 1327 struct megasas_cmd_fusion *cmd)
1330{ 1328{
1331 u8 fp_possible; 1329 u8 fp_possible;
1332 u32 start_lba_lo, start_lba_hi, device_id; 1330 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1333 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1331 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1334 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1332 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1335 struct IO_REQUEST_INFO io_info; 1333 struct IO_REQUEST_INFO io_info;
@@ -1355,7 +1353,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1355 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1353 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1356 */ 1354 */
1357 if (scp->cmd_len == 6) { 1355 if (scp->cmd_len == 6) {
1358 io_request->DataLength = (u32) scp->cmnd[4]; 1356 datalength = (u32) scp->cmnd[4];
1359 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1357 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1360 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1358 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1361 1359
@@ -1366,7 +1364,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1366 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1364 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1367 */ 1365 */
1368 else if (scp->cmd_len == 10) { 1366 else if (scp->cmd_len == 10) {
1369 io_request->DataLength = (u32) scp->cmnd[8] | 1367 datalength = (u32) scp->cmnd[8] |
1370 ((u32) scp->cmnd[7] << 8); 1368 ((u32) scp->cmnd[7] << 8);
1371 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1369 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1372 ((u32) scp->cmnd[3] << 16) | 1370 ((u32) scp->cmnd[3] << 16) |
@@ -1377,7 +1375,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1377 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1375 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1378 */ 1376 */
1379 else if (scp->cmd_len == 12) { 1377 else if (scp->cmd_len == 12) {
1380 io_request->DataLength = ((u32) scp->cmnd[6] << 24) | 1378 datalength = ((u32) scp->cmnd[6] << 24) |
1381 ((u32) scp->cmnd[7] << 16) | 1379 ((u32) scp->cmnd[7] << 16) |
1382 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1380 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1383 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1381 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
@@ -1389,7 +1387,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1389 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1387 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1390 */ 1388 */
1391 else if (scp->cmd_len == 16) { 1389 else if (scp->cmd_len == 16) {
1392 io_request->DataLength = ((u32) scp->cmnd[10] << 24) | 1390 datalength = ((u32) scp->cmnd[10] << 24) |
1393 ((u32) scp->cmnd[11] << 16) | 1391 ((u32) scp->cmnd[11] << 16) |
1394 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1392 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1395 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1393 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
@@ -1403,8 +1401,9 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1403 1401
1404 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1402 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1405 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1403 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1406 io_info.numBlocks = io_request->DataLength; 1404 io_info.numBlocks = datalength;
1407 io_info.ldTgtId = device_id; 1405 io_info.ldTgtId = device_id;
1406 io_request->DataLength = scsi_bufflen(scp);
1408 1407
1409 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1408 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1410 io_info.isRead = 1; 1409 io_info.isRead = 1;
@@ -1431,7 +1430,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1431 if (fp_possible) { 1430 if (fp_possible) {
1432 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 1431 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1433 local_map_ptr, start_lba_lo); 1432 local_map_ptr, start_lba_lo);
1434 io_request->DataLength = scsi_bufflen(scp);
1435 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1433 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1436 cmd->request_desc->SCSIIO.RequestFlags = 1434 cmd->request_desc->SCSIIO.RequestFlags =
1437 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY 1435 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
@@ -1510,7 +1508,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1510 local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; 1508 local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
1511 1509
1512 /* Check if this is a system PD I/O */ 1510 /* Check if this is a system PD I/O */
1513 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1511 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
1512 instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
1514 io_request->Function = 0; 1513 io_request->Function = 0;
1515 io_request->DevHandle = 1514 io_request->DevHandle =
1516 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1515 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1525,6 +1524,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1525 cmd->request_desc->SCSIIO.RequestFlags = 1524 cmd->request_desc->SCSIIO.RequestFlags =
1526 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1525 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1527 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1526 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1527 cmd->request_desc->SCSIIO.DevHandle =
1528 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1528 } else { 1529 } else {
1529 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1530 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1530 io_request->DevHandle = device_id; 1531 io_request->DevHandle = device_id;
@@ -1732,8 +1733,6 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1732 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1733 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1733 return IRQ_NONE; 1734 return IRQ_NONE;
1734 1735
1735 d_val.word = desc->Words;
1736
1737 num_completed = 0; 1736 num_completed = 0;
1738 1737
1739 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { 1738 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
@@ -1855,10 +1854,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
1855 } 1854 }
1856 spin_unlock_irqrestore(&instance->hba_lock, flags); 1855 spin_unlock_irqrestore(&instance->hba_lock, flags);
1857 1856
1858 spin_lock_irqsave(&instance->completion_lock, flags);
1859 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 1857 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
1860 complete_cmd_fusion(instance, MSIxIndex); 1858 complete_cmd_fusion(instance, MSIxIndex);
1861 spin_unlock_irqrestore(&instance->completion_lock, flags);
1862} 1859}
1863 1860
1864/** 1861/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 088c9f91da95..a7c64f051996 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2011 LSI Corporation. 4 * Copyright (c) 2009-2012 LSI Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 783edc7c6b98..c585a925b3cd 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -35,10 +35,12 @@
35#include <linux/io.h> 35#include <linux/io.h>
36#include <scsi/scsi.h> 36#include <scsi/scsi.h>
37#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_device.h>
38#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
39#include <scsi/scsi_transport.h> 40#include <scsi/scsi_transport.h>
40#include <scsi/scsi_eh.h> 41#include <scsi/scsi_eh.h>
41#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <linux/kthread.h>
42 44
43#include "mvumi.h" 45#include "mvumi.h"
44 46
@@ -48,6 +50,7 @@ MODULE_DESCRIPTION("Marvell UMI Driver");
48 50
49static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { 51static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
50 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, 52 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
53 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) },
51 { 0 } 54 { 0 }
52}; 55};
53 56
@@ -118,7 +121,7 @@ static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
118static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, 121static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
119 enum resource_type type, unsigned int size) 122 enum resource_type type, unsigned int size)
120{ 123{
121 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL); 124 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
122 125
123 if (!res) { 126 if (!res) {
124 dev_err(&mhba->pdev->dev, 127 dev_err(&mhba->pdev->dev,
@@ -128,7 +131,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
128 131
129 switch (type) { 132 switch (type) {
130 case RESOURCE_CACHED_MEMORY: 133 case RESOURCE_CACHED_MEMORY:
131 res->virt_addr = kzalloc(size, GFP_KERNEL); 134 res->virt_addr = kzalloc(size, GFP_ATOMIC);
132 if (!res->virt_addr) { 135 if (!res->virt_addr) {
133 dev_err(&mhba->pdev->dev, 136 dev_err(&mhba->pdev->dev,
134 "unable to allocate memory,size = %d.\n", size); 137 "unable to allocate memory,size = %d.\n", size);
@@ -222,11 +225,11 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
222 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); 225 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
223 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); 226 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
224 m_sg->flags = 0; 227 m_sg->flags = 0;
225 m_sg->size = cpu_to_le32(sg_dma_len(&sg[i])); 228 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
226 if ((i + 1) == *sg_count) 229 if ((i + 1) == *sg_count)
227 m_sg->flags |= SGD_EOT; 230 m_sg->flags |= 1U << mhba->eot_flag;
228 231
229 m_sg++; 232 sgd_inc(mhba, m_sg);
230 } 233 }
231 } else { 234 } else {
232 scmd->SCp.dma_handle = scsi_bufflen(scmd) ? 235 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
@@ -237,8 +240,8 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
237 busaddr = scmd->SCp.dma_handle; 240 busaddr = scmd->SCp.dma_handle;
238 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); 241 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
239 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); 242 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
240 m_sg->flags = SGD_EOT; 243 m_sg->flags = 1U << mhba->eot_flag;
241 m_sg->size = cpu_to_le32(scsi_bufflen(scmd)); 244 sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
242 *sg_count = 1; 245 *sg_count = 1;
243 } 246 }
244 247
@@ -267,8 +270,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
267 270
268 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); 271 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
269 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); 272 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
270 m_sg->flags = SGD_EOT; 273 m_sg->flags = 1U << mhba->eot_flag;
271 m_sg->size = cpu_to_le32(size); 274 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
272 275
273 return 0; 276 return 0;
274} 277}
@@ -285,7 +288,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
285 } 288 }
286 INIT_LIST_HEAD(&cmd->queue_pointer); 289 INIT_LIST_HEAD(&cmd->queue_pointer);
287 290
288 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); 291 cmd->frame = pci_alloc_consistent(mhba->pdev,
292 mhba->ib_max_size, &cmd->frame_phys);
289 if (!cmd->frame) { 293 if (!cmd->frame) {
290 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" 294 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
291 " frame,size = %d.\n", mhba->ib_max_size); 295 " frame,size = %d.\n", mhba->ib_max_size);
@@ -297,7 +301,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
297 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { 301 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
298 dev_err(&mhba->pdev->dev, "failed to allocate memory" 302 dev_err(&mhba->pdev->dev, "failed to allocate memory"
299 " for internal frame\n"); 303 " for internal frame\n");
300 kfree(cmd->frame); 304 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
305 cmd->frame, cmd->frame_phys);
301 kfree(cmd); 306 kfree(cmd);
302 return NULL; 307 return NULL;
303 } 308 }
@@ -317,7 +322,7 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
317 if (cmd && cmd->frame) { 322 if (cmd && cmd->frame) {
318 if (cmd->frame->sg_counts) { 323 if (cmd->frame->sg_counts) {
319 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; 324 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
320 size = m_sg->size; 325 sgd_getsz(mhba, m_sg, size);
321 326
322 phy_addr = (dma_addr_t) m_sg->baseaddr_l | 327 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
323 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); 328 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
@@ -325,7 +330,8 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
325 pci_free_consistent(mhba->pdev, size, cmd->data_buf, 330 pci_free_consistent(mhba->pdev, size, cmd->data_buf,
326 phy_addr); 331 phy_addr);
327 } 332 }
328 kfree(cmd->frame); 333 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
334 cmd->frame, cmd->frame_phys);
329 kfree(cmd); 335 kfree(cmd);
330 } 336 }
331} 337}
@@ -374,7 +380,8 @@ static void mvumi_free_cmds(struct mvumi_hba *mhba)
374 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, 380 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
375 queue_pointer); 381 queue_pointer);
376 list_del(&cmd->queue_pointer); 382 list_del(&cmd->queue_pointer);
377 kfree(cmd->frame); 383 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
384 kfree(cmd->frame);
378 kfree(cmd); 385 kfree(cmd);
379 } 386 }
380} 387}
@@ -396,7 +403,12 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
396 403
397 INIT_LIST_HEAD(&cmd->queue_pointer); 404 INIT_LIST_HEAD(&cmd->queue_pointer);
398 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); 405 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
399 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); 406 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
407 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
408 cmd->frame_phys = mhba->ib_frame_phys
409 + i * mhba->ib_max_size;
410 } else
411 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
400 if (!cmd->frame) 412 if (!cmd->frame)
401 goto err_exit; 413 goto err_exit;
402 } 414 }
@@ -409,48 +421,71 @@ err_exit:
409 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, 421 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
410 queue_pointer); 422 queue_pointer);
411 list_del(&cmd->queue_pointer); 423 list_del(&cmd->queue_pointer);
412 kfree(cmd->frame); 424 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
425 kfree(cmd->frame);
413 kfree(cmd); 426 kfree(cmd);
414 } 427 }
415 return -ENOMEM; 428 return -ENOMEM;
416} 429}
417 430
418static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) 431static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
419{ 432{
420 unsigned int ib_rp_reg, cur_ib_entry; 433 unsigned int ib_rp_reg;
434 struct mvumi_hw_regs *regs = mhba->regs;
435
436 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
421 437
438 if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
439 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
440 ((ib_rp_reg & regs->cl_pointer_toggle)
441 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
442 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
443 return 0;
444 }
422 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { 445 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
423 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); 446 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
424 return -1; 447 return 0;
448 } else {
449 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
425 } 450 }
426 ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER); 451}
427 452
428 if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) == 453static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
429 (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) && 454{
430 ((ib_rp_reg & CL_POINTER_TOGGLE) != 455 unsigned int count;
431 (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) { 456 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
432 dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); 457 return 0;
433 return -1; 458 count = ioread32(mhba->ib_shadow);
434 } 459 if (count == 0xffff)
460 return 0;
461 return count;
462}
463
464static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
465{
466 unsigned int cur_ib_entry;
435 467
436 cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK; 468 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
437 cur_ib_entry++; 469 cur_ib_entry++;
438 if (cur_ib_entry >= mhba->list_num_io) { 470 if (cur_ib_entry >= mhba->list_num_io) {
439 cur_ib_entry -= mhba->list_num_io; 471 cur_ib_entry -= mhba->list_num_io;
440 mhba->ib_cur_slot ^= CL_POINTER_TOGGLE; 472 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
473 }
474 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
475 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
476 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
477 *ib_entry = mhba->ib_list + cur_ib_entry *
478 sizeof(struct mvumi_dyn_list_entry);
479 } else {
480 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
441 } 481 }
442 mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
443 mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
444 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
445 atomic_inc(&mhba->fw_outstanding); 482 atomic_inc(&mhba->fw_outstanding);
446
447 return 0;
448} 483}
449 484
450static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) 485static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
451{ 486{
452 iowrite32(0xfff, mhba->ib_shadow); 487 iowrite32(0xffff, mhba->ib_shadow);
453 iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER); 488 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
454} 489}
455 490
456static char mvumi_check_ob_frame(struct mvumi_hba *mhba, 491static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
@@ -480,31 +515,59 @@ static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
480 return 0; 515 return 0;
481} 516}
482 517
483static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) 518static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
519 unsigned int *cur_obf, unsigned int *assign_obf_end)
484{ 520{
485 unsigned int ob_write_reg, ob_write_shadow_reg; 521 unsigned int ob_write, ob_write_shadow;
486 unsigned int cur_obf, assign_obf_end, i; 522 struct mvumi_hw_regs *regs = mhba->regs;
487 struct mvumi_ob_data *ob_data;
488 struct mvumi_rsp_frame *p_outb_frame;
489 523
490 do { 524 do {
491 ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER); 525 ob_write = ioread32(regs->outb_copy_pointer);
492 ob_write_shadow_reg = ioread32(mhba->ob_shadow); 526 ob_write_shadow = ioread32(mhba->ob_shadow);
493 } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg); 527 } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
494 528
495 cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK; 529 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
496 assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK; 530 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
497 531
498 if ((ob_write_reg & CL_POINTER_TOGGLE) != 532 if ((ob_write & regs->cl_pointer_toggle) !=
499 (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) { 533 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
500 assign_obf_end += mhba->list_num_io; 534 *assign_obf_end += mhba->list_num_io;
501 } 535 }
536 return 0;
537}
538
539static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
540 unsigned int *cur_obf, unsigned int *assign_obf_end)
541{
542 unsigned int ob_write;
543 struct mvumi_hw_regs *regs = mhba->regs;
544
545 ob_write = ioread32(regs->outb_read_pointer);
546 ob_write = ioread32(regs->outb_copy_pointer);
547 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
548 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
549 if (*assign_obf_end < *cur_obf)
550 *assign_obf_end += mhba->list_num_io;
551 else if (*assign_obf_end == *cur_obf)
552 return -1;
553 return 0;
554}
555
556static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
557{
558 unsigned int cur_obf, assign_obf_end, i;
559 struct mvumi_ob_data *ob_data;
560 struct mvumi_rsp_frame *p_outb_frame;
561 struct mvumi_hw_regs *regs = mhba->regs;
562
563 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
564 return;
502 565
503 for (i = (assign_obf_end - cur_obf); i != 0; i--) { 566 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
504 cur_obf++; 567 cur_obf++;
505 if (cur_obf >= mhba->list_num_io) { 568 if (cur_obf >= mhba->list_num_io) {
506 cur_obf -= mhba->list_num_io; 569 cur_obf -= mhba->list_num_io;
507 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; 570 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
508 } 571 }
509 572
510 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; 573 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
@@ -528,7 +591,7 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
528 ob_data = NULL; 591 ob_data = NULL;
529 if (cur_obf == 0) { 592 if (cur_obf == 0) {
530 cur_obf = mhba->list_num_io - 1; 593 cur_obf = mhba->list_num_io - 1;
531 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; 594 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
532 } else 595 } else
533 cur_obf -= 1; 596 cur_obf -= 1;
534 break; 597 break;
@@ -539,18 +602,20 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
539 602
540 list_add_tail(&ob_data->list, &mhba->free_ob_list); 603 list_add_tail(&ob_data->list, &mhba->free_ob_list);
541 } 604 }
542 mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK; 605 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
543 mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK); 606 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
544 iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER); 607 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
545} 608}
546 609
547static void mvumi_reset(void *regs) 610static void mvumi_reset(struct mvumi_hba *mhba)
548{ 611{
549 iowrite32(0, regs + CPU_ENPOINTA_MASK_REG); 612 struct mvumi_hw_regs *regs = mhba->regs;
550 if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE) 613
614 iowrite32(0, regs->enpointa_mask_reg);
615 if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
551 return; 616 return;
552 617
553 iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 618 iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
554} 619}
555 620
556static unsigned char mvumi_start(struct mvumi_hba *mhba); 621static unsigned char mvumi_start(struct mvumi_hba *mhba);
@@ -558,7 +623,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba);
558static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) 623static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
559{ 624{
560 mhba->fw_state = FW_STATE_ABORT; 625 mhba->fw_state = FW_STATE_ABORT;
561 mvumi_reset(mhba->mmio); 626 mvumi_reset(mhba);
562 627
563 if (mvumi_start(mhba)) 628 if (mvumi_start(mhba))
564 return FAILED; 629 return FAILED;
@@ -566,6 +631,98 @@ static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
566 return SUCCESS; 631 return SUCCESS;
567} 632}
568 633
634static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
635{
636 struct mvumi_hw_regs *regs = mhba->regs;
637 u32 tmp;
638 unsigned long before;
639 before = jiffies;
640
641 iowrite32(0, regs->enpointa_mask_reg);
642 tmp = ioread32(regs->arm_to_pciea_msg1);
643 while (tmp != HANDSHAKE_READYSTATE) {
644 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
645 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
646 dev_err(&mhba->pdev->dev,
647 "FW reset failed [0x%x].\n", tmp);
648 return FAILED;
649 }
650
651 msleep(500);
652 rmb();
653 tmp = ioread32(regs->arm_to_pciea_msg1);
654 }
655
656 return SUCCESS;
657}
658
659static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
660{
661 unsigned char i;
662
663 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
664 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
665 &mhba->pci_base[i]);
666 }
667}
668
669static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
670{
671 unsigned char i;
672
673 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
674 if (mhba->pci_base[i])
675 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
676 mhba->pci_base[i]);
677 }
678}
679
680static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
681{
682 unsigned int ret = 0;
683 pci_set_master(pdev);
684
685 if (IS_DMA64) {
686 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
687 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
688 } else
689 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
690
691 return ret;
692}
693
694static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
695{
696 mhba->fw_state = FW_STATE_ABORT;
697
698 iowrite32(0, mhba->regs->reset_enable);
699 iowrite32(0xf, mhba->regs->reset_request);
700
701 iowrite32(0x10, mhba->regs->reset_enable);
702 iowrite32(0x10, mhba->regs->reset_request);
703 msleep(100);
704 pci_disable_device(mhba->pdev);
705
706 if (pci_enable_device(mhba->pdev)) {
707 dev_err(&mhba->pdev->dev, "enable device failed\n");
708 return FAILED;
709 }
710 if (mvumi_pci_set_master(mhba->pdev)) {
711 dev_err(&mhba->pdev->dev, "set master failed\n");
712 return FAILED;
713 }
714 mvumi_restore_bar_addr(mhba);
715 if (mvumi_wait_for_fw(mhba) == FAILED)
716 return FAILED;
717
718 return mvumi_wait_for_outstanding(mhba);
719}
720
721static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
722{
723 return mvumi_wait_for_outstanding(mhba);
724}
725
569static int mvumi_host_reset(struct scsi_cmnd *scmd) 726static int mvumi_host_reset(struct scsi_cmnd *scmd)
570{ 727{
571 struct mvumi_hba *mhba; 728 struct mvumi_hba *mhba;
@@ -575,7 +732,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
575 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", 732 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
576 scmd->serial_number, scmd->cmnd[0], scmd->retries); 733 scmd->serial_number, scmd->cmnd[0], scmd->retries);
577 734
578 return mvumi_wait_for_outstanding(mhba); 735 return mhba->instancet->reset_host(mhba);
579} 736}
580 737
581static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, 738static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
@@ -628,7 +785,9 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
628 mvumi_free_cmds(mhba); 785 mvumi_free_cmds(mhba);
629 mvumi_release_mem_resource(mhba); 786 mvumi_release_mem_resource(mhba);
630 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 787 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
631 kfree(mhba->handshake_page); 788 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
789 mhba->handshake_page, mhba->handshake_page_phys);
790 kfree(mhba->regs);
632 pci_release_regions(mhba->pdev); 791 pci_release_regions(mhba->pdev);
633} 792}
634 793
@@ -665,6 +824,7 @@ get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
665 frame->cdb_length = MAX_COMMAND_SIZE; 824 frame->cdb_length = MAX_COMMAND_SIZE;
666 memset(frame->cdb, 0, MAX_COMMAND_SIZE); 825 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
667 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; 826 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
827 frame->cdb[1] = CDB_CORE_MODULE;
668 frame->cdb[2] = CDB_CORE_SHUTDOWN; 828 frame->cdb[2] = CDB_CORE_SHUTDOWN;
669 829
670 mvumi_issue_blocked_cmd(mhba, cmd); 830 mvumi_issue_blocked_cmd(mhba, cmd);
@@ -695,7 +855,7 @@ mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
695 return ret; 855 return ret;
696} 856}
697 857
698void mvumi_hs_build_page(struct mvumi_hba *mhba, 858static void mvumi_hs_build_page(struct mvumi_hba *mhba,
699 struct mvumi_hs_header *hs_header) 859 struct mvumi_hs_header *hs_header)
700{ 860{
701 struct mvumi_hs_page2 *hs_page2; 861 struct mvumi_hs_page2 *hs_page2;
@@ -710,6 +870,8 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
710 hs_header->frame_length = sizeof(*hs_page2) - 4; 870 hs_header->frame_length = sizeof(*hs_page2) - 4;
711 memset(hs_header->frame_content, 0, hs_header->frame_length); 871 memset(hs_header->frame_content, 0, hs_header->frame_length);
712 hs_page2->host_type = 3; /* 3 mean linux*/ 872 hs_page2->host_type = 3; /* 3 mean linux*/
873 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
874 hs_page2->host_cap = 0x08;/* host dynamic source mode */
713 hs_page2->host_ver.ver_major = VER_MAJOR; 875 hs_page2->host_ver.ver_major = VER_MAJOR;
714 hs_page2->host_ver.ver_minor = VER_MINOR; 876 hs_page2->host_ver.ver_minor = VER_MINOR;
715 hs_page2->host_ver.ver_oem = VER_OEM; 877 hs_page2->host_ver.ver_oem = VER_OEM;
@@ -745,8 +907,18 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
745 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); 907 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
746 hs_page4->ib_entry_size = mhba->ib_max_size_setting; 908 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
747 hs_page4->ob_entry_size = mhba->ob_max_size_setting; 909 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
748 hs_page4->ob_depth = mhba->list_num_io; 910 if (mhba->hba_capability
749 hs_page4->ib_depth = mhba->list_num_io; 911 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
912 hs_page4->ob_depth = find_first_bit((unsigned long *)
913 &mhba->list_num_io,
914 BITS_PER_LONG);
915 hs_page4->ib_depth = find_first_bit((unsigned long *)
916 &mhba->list_num_io,
917 BITS_PER_LONG);
918 } else {
919 hs_page4->ob_depth = (u8) mhba->list_num_io;
920 hs_page4->ib_depth = (u8) mhba->list_num_io;
921 }
750 hs_header->checksum = mvumi_calculate_checksum(hs_header, 922 hs_header->checksum = mvumi_calculate_checksum(hs_header,
751 hs_header->frame_length); 923 hs_header->frame_length);
752 break; 924 break;
@@ -774,8 +946,11 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
774 return 0; 946 return 0;
775 947
776 tmp_size = mhba->ib_max_size * mhba->max_io; 948 tmp_size = mhba->ib_max_size * mhba->max_io;
949 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
950 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
951
777 tmp_size += 128 + mhba->ob_max_size * mhba->max_io; 952 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
778 tmp_size += 8 + sizeof(u32) + 16; 953 tmp_size += 8 + sizeof(u32)*2 + 16;
779 954
780 res_mgnt = mvumi_alloc_mem_resource(mhba, 955 res_mgnt = mvumi_alloc_mem_resource(mhba,
781 RESOURCE_UNCACHED_MEMORY, tmp_size); 956 RESOURCE_UNCACHED_MEMORY, tmp_size);
@@ -793,24 +968,41 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
793 v += offset; 968 v += offset;
794 mhba->ib_list = v; 969 mhba->ib_list = v;
795 mhba->ib_list_phys = p; 970 mhba->ib_list_phys = p;
971 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
972 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
973 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
974 mhba->ib_frame = v;
975 mhba->ib_frame_phys = p;
976 }
796 v += mhba->ib_max_size * mhba->max_io; 977 v += mhba->ib_max_size * mhba->max_io;
797 p += mhba->ib_max_size * mhba->max_io; 978 p += mhba->ib_max_size * mhba->max_io;
979
798 /* ib shadow */ 980 /* ib shadow */
799 offset = round_up(p, 8) - p; 981 offset = round_up(p, 8) - p;
800 p += offset; 982 p += offset;
801 v += offset; 983 v += offset;
802 mhba->ib_shadow = v; 984 mhba->ib_shadow = v;
803 mhba->ib_shadow_phys = p; 985 mhba->ib_shadow_phys = p;
804 p += sizeof(u32); 986 p += sizeof(u32)*2;
805 v += sizeof(u32); 987 v += sizeof(u32)*2;
806 /* ob shadow */ 988 /* ob shadow */
807 offset = round_up(p, 8) - p; 989 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
808 p += offset; 990 offset = round_up(p, 8) - p;
809 v += offset; 991 p += offset;
810 mhba->ob_shadow = v; 992 v += offset;
811 mhba->ob_shadow_phys = p; 993 mhba->ob_shadow = v;
812 p += 8; 994 mhba->ob_shadow_phys = p;
813 v += 8; 995 p += 8;
996 v += 8;
997 } else {
998 offset = round_up(p, 4) - p;
999 p += offset;
1000 v += offset;
1001 mhba->ob_shadow = v;
1002 mhba->ob_shadow_phys = p;
1003 p += 4;
1004 v += 4;
1005 }
814 1006
815 /* ob list */ 1007 /* ob list */
816 offset = round_up(p, 128) - p; 1008 offset = round_up(p, 128) - p;
@@ -902,6 +1094,12 @@ static int mvumi_hs_process_page(struct mvumi_hba *mhba,
902 dev_dbg(&mhba->pdev->dev, "FW version:%d\n", 1094 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
903 hs_page1->fw_ver.ver_build); 1095 hs_page1->fw_ver.ver_build);
904 1096
1097 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1098 mhba->eot_flag = 22;
1099 else
1100 mhba->eot_flag = 27;
1101 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1102 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
905 break; 1103 break;
906 default: 1104 default:
907 dev_err(&mhba->pdev->dev, "handshake: page code error\n"); 1105 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
@@ -923,12 +1121,12 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
923{ 1121{
924 unsigned int hs_state, tmp, hs_fun; 1122 unsigned int hs_state, tmp, hs_fun;
925 struct mvumi_hs_header *hs_header; 1123 struct mvumi_hs_header *hs_header;
926 void *regs = mhba->mmio; 1124 struct mvumi_hw_regs *regs = mhba->regs;
927 1125
928 if (mhba->fw_state == FW_STATE_STARTING) 1126 if (mhba->fw_state == FW_STATE_STARTING)
929 hs_state = HS_S_START; 1127 hs_state = HS_S_START;
930 else { 1128 else {
931 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0); 1129 tmp = ioread32(regs->arm_to_pciea_msg0);
932 hs_state = HS_GET_STATE(tmp); 1130 hs_state = HS_GET_STATE(tmp);
933 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); 1131 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
934 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { 1132 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
@@ -943,21 +1141,20 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
943 mhba->fw_state = FW_STATE_HANDSHAKING; 1141 mhba->fw_state = FW_STATE_HANDSHAKING;
944 HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1142 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
945 HS_SET_STATE(hs_fun, HS_S_RESET); 1143 HS_SET_STATE(hs_fun, HS_S_RESET);
946 iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1); 1144 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
947 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); 1145 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
948 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1146 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
949 break; 1147 break;
950 1148
951 case HS_S_RESET: 1149 case HS_S_RESET:
952 iowrite32(lower_32_bits(mhba->handshake_page_phys), 1150 iowrite32(lower_32_bits(mhba->handshake_page_phys),
953 regs + CPU_PCIEA_TO_ARM_MSG1); 1151 regs->pciea_to_arm_msg1);
954 iowrite32(upper_32_bits(mhba->handshake_page_phys), 1152 iowrite32(upper_32_bits(mhba->handshake_page_phys),
955 regs + CPU_ARM_TO_PCIEA_MSG1); 1153 regs->arm_to_pciea_msg1);
956 HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1154 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
957 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); 1155 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
958 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); 1156 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
959 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1157 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
960
961 break; 1158 break;
962 1159
963 case HS_S_PAGE_ADDR: 1160 case HS_S_PAGE_ADDR:
@@ -997,30 +1194,37 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
997 HS_SET_STATE(hs_fun, HS_S_END); 1194 HS_SET_STATE(hs_fun, HS_S_END);
998 1195
999 HS_SET_STATUS(hs_fun, HS_STATUS_OK); 1196 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1000 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); 1197 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1001 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1198 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1002 break; 1199 break;
1003 1200
1004 case HS_S_END: 1201 case HS_S_END:
1005 /* Set communication list ISR */ 1202 /* Set communication list ISR */
1006 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG); 1203 tmp = ioread32(regs->enpointa_mask_reg);
1007 tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR; 1204 tmp |= regs->int_comaout | regs->int_comaerr;
1008 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); 1205 iowrite32(tmp, regs->enpointa_mask_reg);
1009 iowrite32(mhba->list_num_io, mhba->ib_shadow); 1206 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1010 /* Set InBound List Available count shadow */ 1207 /* Set InBound List Available count shadow */
1011 iowrite32(lower_32_bits(mhba->ib_shadow_phys), 1208 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1012 regs + CLA_INB_AVAL_COUNT_BASEL); 1209 regs->inb_aval_count_basel);
1013 iowrite32(upper_32_bits(mhba->ib_shadow_phys), 1210 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1014 regs + CLA_INB_AVAL_COUNT_BASEH); 1211 regs->inb_aval_count_baseh);
1015 1212
1016 /* Set OutBound List Available count shadow */ 1213 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1017 iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE, 1214 /* Set OutBound List Available count shadow */
1018 mhba->ob_shadow); 1215 iowrite32((mhba->list_num_io-1) |
1019 iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0); 1216 regs->cl_pointer_toggle,
1020 iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4); 1217 mhba->ob_shadow);
1218 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1219 regs->outb_copy_basel);
1220 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1221 regs->outb_copy_baseh);
1222 }
1021 1223
1022 mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; 1224 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1023 mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; 1225 regs->cl_pointer_toggle;
1226 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1227 regs->cl_pointer_toggle;
1024 mhba->fw_state = FW_STATE_STARTED; 1228 mhba->fw_state = FW_STATE_STARTED;
1025 1229
1026 break; 1230 break;
@@ -1040,7 +1244,7 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1040 before = jiffies; 1244 before = jiffies;
1041 mvumi_handshake(mhba); 1245 mvumi_handshake(mhba);
1042 do { 1246 do {
1043 isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio); 1247 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1044 1248
1045 if (mhba->fw_state == FW_STATE_STARTED) 1249 if (mhba->fw_state == FW_STATE_STARTED)
1046 return 0; 1250 return 0;
@@ -1062,16 +1266,15 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1062 1266
1063static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) 1267static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1064{ 1268{
1065 void *regs = mhba->mmio;
1066 unsigned int tmp; 1269 unsigned int tmp;
1067 unsigned long before; 1270 unsigned long before;
1068 1271
1069 before = jiffies; 1272 before = jiffies;
1070 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); 1273 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1071 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { 1274 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1072 if (tmp != HANDSHAKE_READYSTATE) 1275 if (tmp != HANDSHAKE_READYSTATE)
1073 iowrite32(DRBL_MU_RESET, 1276 iowrite32(DRBL_MU_RESET,
1074 regs + CPU_PCIEA_TO_ARM_DRBL_REG); 1277 mhba->regs->pciea_to_arm_drbl_reg);
1075 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { 1278 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1076 dev_err(&mhba->pdev->dev, 1279 dev_err(&mhba->pdev->dev,
1077 "invalid signature [0x%x].\n", tmp); 1280 "invalid signature [0x%x].\n", tmp);
@@ -1079,7 +1282,7 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1079 } 1282 }
1080 usleep_range(1000, 2000); 1283 usleep_range(1000, 2000);
1081 rmb(); 1284 rmb();
1082 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); 1285 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1083 } 1286 }
1084 1287
1085 mhba->fw_state = FW_STATE_STARTING; 1288 mhba->fw_state = FW_STATE_STARTING;
@@ -1100,15 +1303,17 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1100 1303
1101static unsigned char mvumi_start(struct mvumi_hba *mhba) 1304static unsigned char mvumi_start(struct mvumi_hba *mhba)
1102{ 1305{
1103 void *regs = mhba->mmio;
1104 unsigned int tmp; 1306 unsigned int tmp;
1307 struct mvumi_hw_regs *regs = mhba->regs;
1308
1105 /* clear Door bell */ 1309 /* clear Door bell */
1106 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1310 tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1107 iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1311 iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1108 1312
1109 iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); 1313 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1110 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA; 1314 tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1111 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); 1315 iowrite32(tmp, regs->enpointa_mask_reg);
1316 msleep(100);
1112 if (mvumi_check_handshake(mhba)) 1317 if (mvumi_check_handshake(mhba))
1113 return -1; 1318 return -1;
1114 1319
@@ -1166,6 +1371,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1166 cmd->scmd->scsi_done(scmd); 1371 cmd->scmd->scsi_done(scmd);
1167 mvumi_return_cmd(mhba, cmd); 1372 mvumi_return_cmd(mhba, cmd);
1168} 1373}
1374
1169static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, 1375static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1170 struct mvumi_cmd *cmd, 1376 struct mvumi_cmd *cmd,
1171 struct mvumi_rsp_frame *ob_frame) 1377 struct mvumi_rsp_frame *ob_frame)
@@ -1210,6 +1416,304 @@ static void mvumi_show_event(struct mvumi_hba *mhba,
1210 } 1416 }
1211} 1417}
1212 1418
1419static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1420{
1421 struct scsi_device *sdev;
1422 int ret = -1;
1423
1424 if (status == DEVICE_OFFLINE) {
1425 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1426 if (sdev) {
1427 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1428 sdev->id, 0);
1429 scsi_remove_device(sdev);
1430 scsi_device_put(sdev);
1431 ret = 0;
1432 } else
1433 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1434 devid);
1435 } else if (status == DEVICE_ONLINE) {
1436 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1437 if (!sdev) {
1438 scsi_add_device(mhba->shost, 0, devid, 0);
1439 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1440 devid, 0);
1441 ret = 0;
1442 } else {
1443 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1444 0, devid, 0);
1445 scsi_device_put(sdev);
1446 }
1447 }
1448 return ret;
1449}
1450
1451static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1452 unsigned int id, struct mvumi_cmd *cmd)
1453{
1454 struct mvumi_msg_frame *frame;
1455 u64 wwid = 0;
1456 int cmd_alloc = 0;
1457 int data_buf_len = 64;
1458
1459 if (!cmd) {
1460 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1461 if (cmd)
1462 cmd_alloc = 1;
1463 else
1464 return 0;
1465 } else {
1466 memset(cmd->data_buf, 0, data_buf_len);
1467 }
1468 cmd->scmd = NULL;
1469 cmd->cmd_status = REQ_STATUS_PENDING;
1470 atomic_set(&cmd->sync_cmd, 0);
1471 frame = cmd->frame;
1472 frame->device_id = (u16) id;
1473 frame->cmd_flag = CMD_FLAG_DATA_IN;
1474 frame->req_function = CL_FUN_SCSI_CMD;
1475 frame->cdb_length = 6;
1476 frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1477 memset(frame->cdb, 0, frame->cdb_length);
1478 frame->cdb[0] = INQUIRY;
1479 frame->cdb[4] = frame->data_transfer_length;
1480
1481 mvumi_issue_blocked_cmd(mhba, cmd);
1482
1483 if (cmd->cmd_status == SAM_STAT_GOOD) {
1484 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1485 wwid = id + 1;
1486 else
1487 memcpy((void *)&wwid,
1488 (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1489 MVUMI_INQUIRY_UUID_LEN);
1490 dev_dbg(&mhba->pdev->dev,
1491 "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1492 } else {
1493 wwid = 0;
1494 }
1495 if (cmd_alloc)
1496 mvumi_delete_internal_cmd(mhba, cmd);
1497
1498 return wwid;
1499}
1500
1501static void mvumi_detach_devices(struct mvumi_hba *mhba)
1502{
1503 struct mvumi_device *mv_dev = NULL , *dev_next;
1504 struct scsi_device *sdev = NULL;
1505
1506 mutex_lock(&mhba->device_lock);
1507
1508 /* detach Hard Disk */
1509 list_for_each_entry_safe(mv_dev, dev_next,
1510 &mhba->shost_dev_list, list) {
1511 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1512 list_del_init(&mv_dev->list);
1513 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1514 mv_dev->id, mv_dev->wwid);
1515 kfree(mv_dev);
1516 }
1517 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1518 list_del_init(&mv_dev->list);
1519 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1520 mv_dev->id, mv_dev->wwid);
1521 kfree(mv_dev);
1522 }
1523
1524 /* detach virtual device */
1525 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1526 sdev = scsi_device_lookup(mhba->shost, 0,
1527 mhba->max_target_id - 1, 0);
1528
1529 if (sdev) {
1530 scsi_remove_device(sdev);
1531 scsi_device_put(sdev);
1532 }
1533
1534 mutex_unlock(&mhba->device_lock);
1535}
1536
1537static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1538{
1539 struct scsi_device *sdev;
1540
1541 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1542 if (sdev) {
1543 scsi_rescan_device(&sdev->sdev_gendev);
1544 scsi_device_put(sdev);
1545 }
1546}
1547
1548static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1549{
1550 struct mvumi_device *mv_dev = NULL;
1551
1552 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1553 if (mv_dev->wwid == wwid) {
1554 if (mv_dev->id != id) {
1555 dev_err(&mhba->pdev->dev,
1556 "%s has same wwid[%llx] ,"
1557 " but different id[%d %d]\n",
1558 __func__, mv_dev->wwid, mv_dev->id, id);
1559 return -1;
1560 } else {
1561 if (mhba->pdev->device ==
1562 PCI_DEVICE_ID_MARVELL_MV9143)
1563 mvumi_rescan_devices(mhba, id);
1564 return 1;
1565 }
1566 }
1567 }
1568 return 0;
1569}
1570
1571static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1572{
1573 struct mvumi_device *mv_dev = NULL, *dev_next;
1574
1575 list_for_each_entry_safe(mv_dev, dev_next,
1576 &mhba->shost_dev_list, list) {
1577 if (mv_dev->id == id) {
1578 dev_dbg(&mhba->pdev->dev,
1579 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1580 mv_dev->id, mv_dev->wwid);
1581 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1582 list_del_init(&mv_dev->list);
1583 kfree(mv_dev);
1584 }
1585 }
1586}
1587
1588static int mvumi_probe_devices(struct mvumi_hba *mhba)
1589{
1590 int id, maxid;
1591 u64 wwid = 0;
1592 struct mvumi_device *mv_dev = NULL;
1593 struct mvumi_cmd *cmd = NULL;
1594 int found = 0;
1595
1596 cmd = mvumi_create_internal_cmd(mhba, 64);
1597 if (!cmd)
1598 return -1;
1599
1600 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1601 maxid = mhba->max_target_id;
1602 else
1603 maxid = mhba->max_target_id - 1;
1604
1605 for (id = 0; id < maxid; id++) {
1606 wwid = mvumi_inquiry(mhba, id, cmd);
1607 if (!wwid) {
1608 /* device no response, remove it */
1609 mvumi_remove_devices(mhba, id);
1610 } else {
1611 /* device response, add it */
1612 found = mvumi_match_devices(mhba, id, wwid);
1613 if (!found) {
1614 mvumi_remove_devices(mhba, id);
1615 mv_dev = kzalloc(sizeof(struct mvumi_device),
1616 GFP_KERNEL);
1617 if (!mv_dev) {
1618 dev_err(&mhba->pdev->dev,
1619 "%s alloc mv_dev failed\n",
1620 __func__);
1621 continue;
1622 }
1623 mv_dev->id = id;
1624 mv_dev->wwid = wwid;
1625 mv_dev->sdev = NULL;
1626 INIT_LIST_HEAD(&mv_dev->list);
1627 list_add_tail(&mv_dev->list,
1628 &mhba->mhba_dev_list);
1629 dev_dbg(&mhba->pdev->dev,
1630 "probe a new device(0:%d:0)"
1631 " wwid(%llx)\n", id, mv_dev->wwid);
1632 } else if (found == -1)
1633 return -1;
1634 else
1635 continue;
1636 }
1637 }
1638
1639 if (cmd)
1640 mvumi_delete_internal_cmd(mhba, cmd);
1641
1642 return 0;
1643}
1644
1645static int mvumi_rescan_bus(void *data)
1646{
1647 int ret = 0;
1648 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1649 struct mvumi_device *mv_dev = NULL , *dev_next;
1650
1651 while (!kthread_should_stop()) {
1652
1653 set_current_state(TASK_INTERRUPTIBLE);
1654 if (!atomic_read(&mhba->pnp_count))
1655 schedule();
1656 msleep(1000);
1657 atomic_set(&mhba->pnp_count, 0);
1658 __set_current_state(TASK_RUNNING);
1659
1660 mutex_lock(&mhba->device_lock);
1661 ret = mvumi_probe_devices(mhba);
1662 if (!ret) {
1663 list_for_each_entry_safe(mv_dev, dev_next,
1664 &mhba->mhba_dev_list, list) {
1665 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1666 DEVICE_ONLINE)) {
1667 dev_err(&mhba->pdev->dev,
1668 "%s add device(0:%d:0) failed"
1669 "wwid(%llx) has exist\n",
1670 __func__,
1671 mv_dev->id, mv_dev->wwid);
1672 list_del_init(&mv_dev->list);
1673 kfree(mv_dev);
1674 } else {
1675 list_move_tail(&mv_dev->list,
1676 &mhba->shost_dev_list);
1677 }
1678 }
1679 }
1680 mutex_unlock(&mhba->device_lock);
1681 }
1682 return 0;
1683}
1684
1685static void mvumi_proc_msg(struct mvumi_hba *mhba,
1686 struct mvumi_hotplug_event *param)
1687{
1688 u16 size = param->size;
1689 const unsigned long *ar_bitmap;
1690 const unsigned long *re_bitmap;
1691 int index;
1692
1693 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1694 index = -1;
1695 ar_bitmap = (const unsigned long *) param->bitmap;
1696 re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1697
1698 mutex_lock(&mhba->sas_discovery_mutex);
1699 do {
1700 index = find_next_zero_bit(ar_bitmap, size, index + 1);
1701 if (index >= size)
1702 break;
1703 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1704 } while (1);
1705
1706 index = -1;
1707 do {
1708 index = find_next_zero_bit(re_bitmap, size, index + 1);
1709 if (index >= size)
1710 break;
1711 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1712 } while (1);
1713 mutex_unlock(&mhba->sas_discovery_mutex);
1714 }
1715}
1716
1213static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) 1717static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1214{ 1718{
1215 if (msg == APICDB1_EVENT_GETEVENT) { 1719 if (msg == APICDB1_EVENT_GETEVENT) {
@@ -1227,6 +1731,8 @@ static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1227 param = &er->events[i]; 1731 param = &er->events[i];
1228 mvumi_show_event(mhba, param); 1732 mvumi_show_event(mhba, param);
1229 } 1733 }
1734 } else if (msg == APICDB1_HOST_GETEVENT) {
1735 mvumi_proc_msg(mhba, buffer);
1230 } 1736 }
1231} 1737}
1232 1738
@@ -1271,17 +1777,27 @@ static void mvumi_scan_events(struct work_struct *work)
1271 kfree(mu_ev); 1777 kfree(mu_ev);
1272} 1778}
1273 1779
1274static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg) 1780static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1275{ 1781{
1276 struct mvumi_events_wq *mu_ev; 1782 struct mvumi_events_wq *mu_ev;
1277 1783
1278 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); 1784 while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1279 if (mu_ev) { 1785 if (isr_status & DRBL_BUS_CHANGE) {
1280 INIT_WORK(&mu_ev->work_q, mvumi_scan_events); 1786 atomic_inc(&mhba->pnp_count);
1281 mu_ev->mhba = mhba; 1787 wake_up_process(mhba->dm_thread);
1282 mu_ev->event = msg; 1788 isr_status &= ~(DRBL_BUS_CHANGE);
1283 mu_ev->param = NULL; 1789 continue;
1284 schedule_work(&mu_ev->work_q); 1790 }
1791
1792 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1793 if (mu_ev) {
1794 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1795 mu_ev->mhba = mhba;
1796 mu_ev->event = APICDB1_EVENT_GETEVENT;
1797 isr_status &= ~(DRBL_EVENT_NOTIFY);
1798 mu_ev->param = NULL;
1799 schedule_work(&mu_ev->work_q);
1800 }
1285 } 1801 }
1286} 1802}
1287 1803
@@ -1322,16 +1838,17 @@ static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1322 return IRQ_NONE; 1838 return IRQ_NONE;
1323 } 1839 }
1324 1840
1325 if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) { 1841 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1842 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1843 mvumi_launch_events(mhba, mhba->isr_status);
1326 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { 1844 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1327 dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); 1845 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1328 mvumi_handshake(mhba); 1846 mvumi_handshake(mhba);
1329 } 1847 }
1330 if (mhba->isr_status & DRBL_EVENT_NOTIFY) 1848
1331 mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
1332 } 1849 }
1333 1850
1334 if (mhba->global_isr & INT_MAP_COMAOUT) 1851 if (mhba->global_isr & mhba->regs->int_comaout)
1335 mvumi_receive_ob_list_entry(mhba); 1852 mvumi_receive_ob_list_entry(mhba);
1336 1853
1337 mhba->global_isr = 0; 1854 mhba->global_isr = 0;
@@ -1358,8 +1875,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1358 dev_dbg(&mhba->pdev->dev, "no free tag.\n"); 1875 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1359 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; 1876 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1360 } 1877 }
1361 if (mvumi_get_ib_list_entry(mhba, &ib_entry)) 1878 mvumi_get_ib_list_entry(mhba, &ib_entry);
1362 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1363 1879
1364 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); 1880 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1365 cmd->frame->request_id = mhba->io_seq++; 1881 cmd->frame->request_id = mhba->io_seq++;
@@ -1367,21 +1883,35 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1367 mhba->tag_cmd[cmd->frame->tag] = cmd; 1883 mhba->tag_cmd[cmd->frame->tag] = cmd;
1368 frame_len = sizeof(*ib_frame) - 4 + 1884 frame_len = sizeof(*ib_frame) - 4 +
1369 ib_frame->sg_counts * sizeof(struct mvumi_sgl); 1885 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1370 memcpy(ib_entry, ib_frame, frame_len); 1886 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1887 struct mvumi_dyn_list_entry *dle;
1888 dle = ib_entry;
1889 dle->src_low_addr =
1890 cpu_to_le32(lower_32_bits(cmd->frame_phys));
1891 dle->src_high_addr =
1892 cpu_to_le32(upper_32_bits(cmd->frame_phys));
1893 dle->if_length = (frame_len >> 2) & 0xFFF;
1894 } else {
1895 memcpy(ib_entry, ib_frame, frame_len);
1896 }
1371 return MV_QUEUE_COMMAND_RESULT_SENT; 1897 return MV_QUEUE_COMMAND_RESULT_SENT;
1372} 1898}
1373 1899
1374static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) 1900static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1375{ 1901{
1376 unsigned short num_of_cl_sent = 0; 1902 unsigned short num_of_cl_sent = 0;
1903 unsigned int count;
1377 enum mvumi_qc_result result; 1904 enum mvumi_qc_result result;
1378 1905
1379 if (cmd) 1906 if (cmd)
1380 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); 1907 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1908 count = mhba->instancet->check_ib_list(mhba);
1909 if (list_empty(&mhba->waiting_req_list) || !count)
1910 return;
1381 1911
1382 while (!list_empty(&mhba->waiting_req_list)) { 1912 do {
1383 cmd = list_first_entry(&mhba->waiting_req_list, 1913 cmd = list_first_entry(&mhba->waiting_req_list,
1384 struct mvumi_cmd, queue_pointer); 1914 struct mvumi_cmd, queue_pointer);
1385 list_del_init(&cmd->queue_pointer); 1915 list_del_init(&cmd->queue_pointer);
1386 result = mvumi_send_command(mhba, cmd); 1916 result = mvumi_send_command(mhba, cmd);
1387 switch (result) { 1917 switch (result) {
@@ -1395,65 +1925,77 @@ static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1395 1925
1396 return; 1926 return;
1397 } 1927 }
1398 } 1928 } while (!list_empty(&mhba->waiting_req_list) && count--);
1929
1399 if (num_of_cl_sent > 0) 1930 if (num_of_cl_sent > 0)
1400 mvumi_send_ib_list_entry(mhba); 1931 mvumi_send_ib_list_entry(mhba);
1401} 1932}
1402 1933
1403/** 1934/**
1404 * mvumi_enable_intr - Enables interrupts 1935 * mvumi_enable_intr - Enables interrupts
1405 * @regs: FW register set 1936 * @mhba: Adapter soft state
1406 */ 1937 */
1407static void mvumi_enable_intr(void *regs) 1938static void mvumi_enable_intr(struct mvumi_hba *mhba)
1408{ 1939{
1409 unsigned int mask; 1940 unsigned int mask;
1941 struct mvumi_hw_regs *regs = mhba->regs;
1410 1942
1411 iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); 1943 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1412 mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); 1944 mask = ioread32(regs->enpointa_mask_reg);
1413 mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR; 1945 mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1414 iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); 1946 iowrite32(mask, regs->enpointa_mask_reg);
1415} 1947}
1416 1948
1417/** 1949/**
1418 * mvumi_disable_intr -Disables interrupt 1950 * mvumi_disable_intr -Disables interrupt
1419 * @regs: FW register set 1951 * @mhba: Adapter soft state
1420 */ 1952 */
1421static void mvumi_disable_intr(void *regs) 1953static void mvumi_disable_intr(struct mvumi_hba *mhba)
1422{ 1954{
1423 unsigned int mask; 1955 unsigned int mask;
1956 struct mvumi_hw_regs *regs = mhba->regs;
1424 1957
1425 iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG); 1958 iowrite32(0, regs->arm_to_pciea_mask_reg);
1426 mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); 1959 mask = ioread32(regs->enpointa_mask_reg);
1427 mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR); 1960 mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1428 iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); 1961 regs->int_comaerr);
1962 iowrite32(mask, regs->enpointa_mask_reg);
1429} 1963}
1430 1964
1431static int mvumi_clear_intr(void *extend) 1965static int mvumi_clear_intr(void *extend)
1432{ 1966{
1433 struct mvumi_hba *mhba = (struct mvumi_hba *) extend; 1967 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1434 unsigned int status, isr_status = 0, tmp = 0; 1968 unsigned int status, isr_status = 0, tmp = 0;
1435 void *regs = mhba->mmio; 1969 struct mvumi_hw_regs *regs = mhba->regs;
1436 1970
1437 status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG); 1971 status = ioread32(regs->main_int_cause_reg);
1438 if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF) 1972 if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1439 return 1; 1973 return 1;
1440 if (unlikely(status & INT_MAP_COMAERR)) { 1974 if (unlikely(status & regs->int_comaerr)) {
1441 tmp = ioread32(regs + CLA_ISR_CAUSE); 1975 tmp = ioread32(regs->outb_isr_cause);
1442 if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ)) 1976 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1443 iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ), 1977 if (tmp & regs->clic_out_err) {
1444 regs + CLA_ISR_CAUSE); 1978 iowrite32(tmp & regs->clic_out_err,
1445 status ^= INT_MAP_COMAERR; 1979 regs->outb_isr_cause);
1980 }
1981 } else {
1982 if (tmp & (regs->clic_in_err | regs->clic_out_err))
1983 iowrite32(tmp & (regs->clic_in_err |
1984 regs->clic_out_err),
1985 regs->outb_isr_cause);
1986 }
1987 status ^= mhba->regs->int_comaerr;
1446 /* inbound or outbound parity error, command will timeout */ 1988 /* inbound or outbound parity error, command will timeout */
1447 } 1989 }
1448 if (status & INT_MAP_COMAOUT) { 1990 if (status & regs->int_comaout) {
1449 tmp = ioread32(regs + CLA_ISR_CAUSE); 1991 tmp = ioread32(regs->outb_isr_cause);
1450 if (tmp & CLIC_OUT_IRQ) 1992 if (tmp & regs->clic_irq)
1451 iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE); 1993 iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1452 } 1994 }
1453 if (status & INT_MAP_DL_CPU2PCIEA) { 1995 if (status & regs->int_dl_cpu2pciea) {
1454 isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1996 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1455 if (isr_status) 1997 if (isr_status)
1456 iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); 1998 iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1457 } 1999 }
1458 2000
1459 mhba->global_isr = status; 2001 mhba->global_isr = status;
@@ -1464,24 +2006,38 @@ static int mvumi_clear_intr(void *extend)
1464 2006
1465/** 2007/**
1466 * mvumi_read_fw_status_reg - returns the current FW status value 2008 * mvumi_read_fw_status_reg - returns the current FW status value
1467 * @regs: FW register set 2009 * @mhba: Adapter soft state
1468 */ 2010 */
1469static unsigned int mvumi_read_fw_status_reg(void *regs) 2011static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1470{ 2012{
1471 unsigned int status; 2013 unsigned int status;
1472 2014
1473 status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); 2015 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1474 if (status) 2016 if (status)
1475 iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); 2017 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1476 return status; 2018 return status;
1477} 2019}
1478 2020
1479static struct mvumi_instance_template mvumi_instance_template = { 2021static struct mvumi_instance_template mvumi_instance_9143 = {
1480 .fire_cmd = mvumi_fire_cmd, 2022 .fire_cmd = mvumi_fire_cmd,
1481 .enable_intr = mvumi_enable_intr, 2023 .enable_intr = mvumi_enable_intr,
1482 .disable_intr = mvumi_disable_intr, 2024 .disable_intr = mvumi_disable_intr,
1483 .clear_intr = mvumi_clear_intr, 2025 .clear_intr = mvumi_clear_intr,
1484 .read_fw_status_reg = mvumi_read_fw_status_reg, 2026 .read_fw_status_reg = mvumi_read_fw_status_reg,
2027 .check_ib_list = mvumi_check_ib_list_9143,
2028 .check_ob_list = mvumi_check_ob_list_9143,
2029 .reset_host = mvumi_reset_host_9143,
2030};
2031
2032static struct mvumi_instance_template mvumi_instance_9580 = {
2033 .fire_cmd = mvumi_fire_cmd,
2034 .enable_intr = mvumi_enable_intr,
2035 .disable_intr = mvumi_disable_intr,
2036 .clear_intr = mvumi_clear_intr,
2037 .read_fw_status_reg = mvumi_read_fw_status_reg,
2038 .check_ib_list = mvumi_check_ib_list_9580,
2039 .check_ob_list = mvumi_check_ob_list_9580,
2040 .reset_host = mvumi_reset_host_9580,
1485}; 2041};
1486 2042
1487static int mvumi_slave_configure(struct scsi_device *sdev) 2043static int mvumi_slave_configure(struct scsi_device *sdev)
@@ -1681,6 +2237,124 @@ static struct scsi_transport_template mvumi_transport_template = {
1681 .eh_timed_out = mvumi_timed_out, 2237 .eh_timed_out = mvumi_timed_out,
1682}; 2238};
1683 2239
2240static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2241{
2242 void *base = NULL;
2243 struct mvumi_hw_regs *regs;
2244
2245 switch (mhba->pdev->device) {
2246 case PCI_DEVICE_ID_MARVELL_MV9143:
2247 mhba->mmio = mhba->base_addr[0];
2248 base = mhba->mmio;
2249 if (!mhba->regs) {
2250 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2251 if (mhba->regs == NULL)
2252 return -ENOMEM;
2253 }
2254 regs = mhba->regs;
2255
2256 /* For Arm */
2257 regs->ctrl_sts_reg = base + 0x20104;
2258 regs->rstoutn_mask_reg = base + 0x20108;
2259 regs->sys_soft_rst_reg = base + 0x2010C;
2260 regs->main_int_cause_reg = base + 0x20200;
2261 regs->enpointa_mask_reg = base + 0x2020C;
2262 regs->rstoutn_en_reg = base + 0xF1400;
2263 /* For Doorbell */
2264 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2265 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2266 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2267 regs->pciea_to_arm_msg0 = base + 0x20430;
2268 regs->pciea_to_arm_msg1 = base + 0x20434;
2269 regs->arm_to_pciea_msg0 = base + 0x20438;
2270 regs->arm_to_pciea_msg1 = base + 0x2043C;
2271
2272 /* For Message Unit */
2273
2274 regs->inb_aval_count_basel = base + 0x508;
2275 regs->inb_aval_count_baseh = base + 0x50C;
2276 regs->inb_write_pointer = base + 0x518;
2277 regs->inb_read_pointer = base + 0x51C;
2278 regs->outb_coal_cfg = base + 0x568;
2279 regs->outb_copy_basel = base + 0x5B0;
2280 regs->outb_copy_baseh = base + 0x5B4;
2281 regs->outb_copy_pointer = base + 0x544;
2282 regs->outb_read_pointer = base + 0x548;
2283 regs->outb_isr_cause = base + 0x560;
2284 regs->outb_coal_cfg = base + 0x568;
2285 /* Bit setting for HW */
2286 regs->int_comaout = 1 << 8;
2287 regs->int_comaerr = 1 << 6;
2288 regs->int_dl_cpu2pciea = 1 << 1;
2289 regs->cl_pointer_toggle = 1 << 12;
2290 regs->clic_irq = 1 << 1;
2291 regs->clic_in_err = 1 << 8;
2292 regs->clic_out_err = 1 << 12;
2293 regs->cl_slot_num_mask = 0xFFF;
2294 regs->int_drbl_int_mask = 0x3FFFFFFF;
2295 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2296 regs->int_comaerr;
2297 break;
2298 case PCI_DEVICE_ID_MARVELL_MV9580:
2299 mhba->mmio = mhba->base_addr[2];
2300 base = mhba->mmio;
2301 if (!mhba->regs) {
2302 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2303 if (mhba->regs == NULL)
2304 return -ENOMEM;
2305 }
2306 regs = mhba->regs;
2307 /* For Arm */
2308 regs->ctrl_sts_reg = base + 0x20104;
2309 regs->rstoutn_mask_reg = base + 0x1010C;
2310 regs->sys_soft_rst_reg = base + 0x10108;
2311 regs->main_int_cause_reg = base + 0x10200;
2312 regs->enpointa_mask_reg = base + 0x1020C;
2313 regs->rstoutn_en_reg = base + 0xF1400;
2314
2315 /* For Doorbell */
2316 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2317 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2318 regs->arm_to_pciea_mask_reg = base + 0x10484;
2319 regs->pciea_to_arm_msg0 = base + 0x10400;
2320 regs->pciea_to_arm_msg1 = base + 0x10404;
2321 regs->arm_to_pciea_msg0 = base + 0x10420;
2322 regs->arm_to_pciea_msg1 = base + 0x10424;
2323
2324 /* For reset*/
2325 regs->reset_request = base + 0x10108;
2326 regs->reset_enable = base + 0x1010c;
2327
2328 /* For Message Unit */
2329 regs->inb_aval_count_basel = base + 0x4008;
2330 regs->inb_aval_count_baseh = base + 0x400C;
2331 regs->inb_write_pointer = base + 0x4018;
2332 regs->inb_read_pointer = base + 0x401C;
2333 regs->outb_copy_basel = base + 0x4058;
2334 regs->outb_copy_baseh = base + 0x405C;
2335 regs->outb_copy_pointer = base + 0x406C;
2336 regs->outb_read_pointer = base + 0x4070;
2337 regs->outb_coal_cfg = base + 0x4080;
2338 regs->outb_isr_cause = base + 0x4088;
2339 /* Bit setting for HW */
2340 regs->int_comaout = 1 << 4;
2341 regs->int_dl_cpu2pciea = 1 << 12;
2342 regs->int_comaerr = 1 << 29;
2343 regs->cl_pointer_toggle = 1 << 14;
2344 regs->cl_slot_num_mask = 0x3FFF;
2345 regs->clic_irq = 1 << 0;
2346 regs->clic_out_err = 1 << 1;
2347 regs->int_drbl_int_mask = 0x3FFFFFFF;
2348 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2349 break;
2350 default:
2351 return -1;
2352 break;
2353 }
2354
2355 return 0;
2356}
2357
1684/** 2358/**
1685 * mvumi_init_fw - Initializes the FW 2359 * mvumi_init_fw - Initializes the FW
1686 * @mhba: Adapter soft state 2360 * @mhba: Adapter soft state
@@ -1699,15 +2373,18 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
1699 if (ret) 2373 if (ret)
1700 goto fail_ioremap; 2374 goto fail_ioremap;
1701 2375
1702 mhba->mmio = mhba->base_addr[0];
1703
1704 switch (mhba->pdev->device) { 2376 switch (mhba->pdev->device) {
1705 case PCI_DEVICE_ID_MARVELL_MV9143: 2377 case PCI_DEVICE_ID_MARVELL_MV9143:
1706 mhba->instancet = &mvumi_instance_template; 2378 mhba->instancet = &mvumi_instance_9143;
1707 mhba->io_seq = 0; 2379 mhba->io_seq = 0;
1708 mhba->max_sge = MVUMI_MAX_SG_ENTRY; 2380 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
1709 mhba->request_id_enabled = 1; 2381 mhba->request_id_enabled = 1;
1710 break; 2382 break;
2383 case PCI_DEVICE_ID_MARVELL_MV9580:
2384 mhba->instancet = &mvumi_instance_9580;
2385 mhba->io_seq = 0;
2386 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2387 break;
1711 default: 2388 default:
1712 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", 2389 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
1713 mhba->pdev->device); 2390 mhba->pdev->device);
@@ -1717,15 +2394,21 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
1717 } 2394 }
1718 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", 2395 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
1719 mhba->pdev->device); 2396 mhba->pdev->device);
1720 2397 ret = mvumi_cfg_hw_reg(mhba);
1721 mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL); 2398 if (ret) {
2399 dev_err(&mhba->pdev->dev,
2400 "failed to allocate memory for reg\n");
2401 ret = -ENOMEM;
2402 goto fail_alloc_mem;
2403 }
2404 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2405 &mhba->handshake_page_phys);
1722 if (!mhba->handshake_page) { 2406 if (!mhba->handshake_page) {
1723 dev_err(&mhba->pdev->dev, 2407 dev_err(&mhba->pdev->dev,
1724 "failed to allocate memory for handshake\n"); 2408 "failed to allocate memory for handshake\n");
1725 ret = -ENOMEM; 2409 ret = -ENOMEM;
1726 goto fail_alloc_mem; 2410 goto fail_alloc_page;
1727 } 2411 }
1728 mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
1729 2412
1730 if (mvumi_start(mhba)) { 2413 if (mvumi_start(mhba)) {
1731 ret = -EINVAL; 2414 ret = -EINVAL;
@@ -1739,7 +2422,10 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
1739 2422
1740fail_ready_state: 2423fail_ready_state:
1741 mvumi_release_mem_resource(mhba); 2424 mvumi_release_mem_resource(mhba);
1742 kfree(mhba->handshake_page); 2425 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2426 mhba->handshake_page, mhba->handshake_page_phys);
2427fail_alloc_page:
2428 kfree(mhba->regs);
1743fail_alloc_mem: 2429fail_alloc_mem:
1744 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 2430 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
1745fail_ioremap: 2431fail_ioremap:
@@ -1755,6 +2441,7 @@ fail_ioremap:
1755static int mvumi_io_attach(struct mvumi_hba *mhba) 2441static int mvumi_io_attach(struct mvumi_hba *mhba)
1756{ 2442{
1757 struct Scsi_Host *host = mhba->shost; 2443 struct Scsi_Host *host = mhba->shost;
2444 struct scsi_device *sdev = NULL;
1758 int ret; 2445 int ret;
1759 unsigned int max_sg = (mhba->ib_max_size + 4 - 2446 unsigned int max_sg = (mhba->ib_max_size + 4 -
1760 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); 2447 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
@@ -1764,7 +2451,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
1764 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2451 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1765 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; 2452 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
1766 host->max_sectors = mhba->max_transfer_size / 512; 2453 host->max_sectors = mhba->max_transfer_size / 512;
1767 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2454 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1768 host->max_id = mhba->max_target_id; 2455 host->max_id = mhba->max_target_id;
1769 host->max_cmd_len = MAX_COMMAND_SIZE; 2456 host->max_cmd_len = MAX_COMMAND_SIZE;
1770 host->transportt = &mvumi_transport_template; 2457 host->transportt = &mvumi_transport_template;
@@ -1775,9 +2462,43 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
1775 return ret; 2462 return ret;
1776 } 2463 }
1777 mhba->fw_flag |= MVUMI_FW_ATTACH; 2464 mhba->fw_flag |= MVUMI_FW_ATTACH;
1778 scsi_scan_host(host);
1779 2465
2466 mutex_lock(&mhba->sas_discovery_mutex);
2467 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2468 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2469 else
2470 ret = 0;
2471 if (ret) {
2472 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2473 mutex_unlock(&mhba->sas_discovery_mutex);
2474 goto fail_add_device;
2475 }
2476
2477 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2478 mhba, "mvumi_scanthread");
2479 if (IS_ERR(mhba->dm_thread)) {
2480 dev_err(&mhba->pdev->dev,
2481 "failed to create device scan thread\n");
2482 mutex_unlock(&mhba->sas_discovery_mutex);
2483 goto fail_create_thread;
2484 }
2485 atomic_set(&mhba->pnp_count, 1);
2486 wake_up_process(mhba->dm_thread);
2487
2488 mutex_unlock(&mhba->sas_discovery_mutex);
1780 return 0; 2489 return 0;
2490
2491fail_create_thread:
2492 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2493 sdev = scsi_device_lookup(mhba->shost, 0,
2494 mhba->max_target_id - 1, 0);
2495 if (sdev) {
2496 scsi_remove_device(sdev);
2497 scsi_device_put(sdev);
2498 }
2499fail_add_device:
2500 scsi_remove_host(mhba->shost);
2501 return ret;
1781} 2502}
1782 2503
1783/** 2504/**
@@ -1828,8 +2549,12 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1828 INIT_LIST_HEAD(&mhba->free_ob_list); 2549 INIT_LIST_HEAD(&mhba->free_ob_list);
1829 INIT_LIST_HEAD(&mhba->res_list); 2550 INIT_LIST_HEAD(&mhba->res_list);
1830 INIT_LIST_HEAD(&mhba->waiting_req_list); 2551 INIT_LIST_HEAD(&mhba->waiting_req_list);
2552 mutex_init(&mhba->device_lock);
2553 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2554 INIT_LIST_HEAD(&mhba->shost_dev_list);
1831 atomic_set(&mhba->fw_outstanding, 0); 2555 atomic_set(&mhba->fw_outstanding, 0);
1832 init_waitqueue_head(&mhba->int_cmd_wait_q); 2556 init_waitqueue_head(&mhba->int_cmd_wait_q);
2557 mutex_init(&mhba->sas_discovery_mutex);
1833 2558
1834 mhba->pdev = pdev; 2559 mhba->pdev = pdev;
1835 mhba->shost = host; 2560 mhba->shost = host;
@@ -1845,19 +2570,22 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1845 dev_err(&pdev->dev, "failed to register IRQ\n"); 2570 dev_err(&pdev->dev, "failed to register IRQ\n");
1846 goto fail_init_irq; 2571 goto fail_init_irq;
1847 } 2572 }
1848 mhba->instancet->enable_intr(mhba->mmio); 2573
2574 mhba->instancet->enable_intr(mhba);
1849 pci_set_drvdata(pdev, mhba); 2575 pci_set_drvdata(pdev, mhba);
1850 2576
1851 ret = mvumi_io_attach(mhba); 2577 ret = mvumi_io_attach(mhba);
1852 if (ret) 2578 if (ret)
1853 goto fail_io_attach; 2579 goto fail_io_attach;
2580
2581 mvumi_backup_bar_addr(mhba);
1854 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); 2582 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
1855 2583
1856 return 0; 2584 return 0;
1857 2585
1858fail_io_attach: 2586fail_io_attach:
1859 pci_set_drvdata(pdev, NULL); 2587 pci_set_drvdata(pdev, NULL);
1860 mhba->instancet->disable_intr(mhba->mmio); 2588 mhba->instancet->disable_intr(mhba);
1861 free_irq(mhba->pdev->irq, mhba); 2589 free_irq(mhba->pdev->irq, mhba);
1862fail_init_irq: 2590fail_init_irq:
1863 mvumi_release_fw(mhba); 2591 mvumi_release_fw(mhba);
@@ -1877,11 +2605,17 @@ static void mvumi_detach_one(struct pci_dev *pdev)
1877 struct mvumi_hba *mhba; 2605 struct mvumi_hba *mhba;
1878 2606
1879 mhba = pci_get_drvdata(pdev); 2607 mhba = pci_get_drvdata(pdev);
2608 if (mhba->dm_thread) {
2609 kthread_stop(mhba->dm_thread);
2610 mhba->dm_thread = NULL;
2611 }
2612
2613 mvumi_detach_devices(mhba);
1880 host = mhba->shost; 2614 host = mhba->shost;
1881 scsi_remove_host(mhba->shost); 2615 scsi_remove_host(mhba->shost);
1882 mvumi_flush_cache(mhba); 2616 mvumi_flush_cache(mhba);
1883 2617
1884 mhba->instancet->disable_intr(mhba->mmio); 2618 mhba->instancet->disable_intr(mhba);
1885 free_irq(mhba->pdev->irq, mhba); 2619 free_irq(mhba->pdev->irq, mhba);
1886 mvumi_release_fw(mhba); 2620 mvumi_release_fw(mhba);
1887 scsi_host_put(host); 2621 scsi_host_put(host);
@@ -1909,7 +2643,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
1909 mvumi_flush_cache(mhba); 2643 mvumi_flush_cache(mhba);
1910 2644
1911 pci_set_drvdata(pdev, mhba); 2645 pci_set_drvdata(pdev, mhba);
1912 mhba->instancet->disable_intr(mhba->mmio); 2646 mhba->instancet->disable_intr(mhba);
1913 free_irq(mhba->pdev->irq, mhba); 2647 free_irq(mhba->pdev->irq, mhba);
1914 mvumi_unmap_pci_addr(pdev, mhba->base_addr); 2648 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1915 pci_release_regions(pdev); 2649 pci_release_regions(pdev);
@@ -1956,8 +2690,13 @@ static int mvumi_resume(struct pci_dev *pdev)
1956 if (ret) 2690 if (ret)
1957 goto release_regions; 2691 goto release_regions;
1958 2692
2693 if (mvumi_cfg_hw_reg(mhba)) {
2694 ret = -EINVAL;
2695 goto unmap_pci_addr;
2696 }
2697
1959 mhba->mmio = mhba->base_addr[0]; 2698 mhba->mmio = mhba->base_addr[0];
1960 mvumi_reset(mhba->mmio); 2699 mvumi_reset(mhba);
1961 2700
1962 if (mvumi_start(mhba)) { 2701 if (mvumi_start(mhba)) {
1963 ret = -EINVAL; 2702 ret = -EINVAL;
@@ -1970,7 +2709,7 @@ static int mvumi_resume(struct pci_dev *pdev)
1970 dev_err(&pdev->dev, "failed to register IRQ\n"); 2709 dev_err(&pdev->dev, "failed to register IRQ\n");
1971 goto unmap_pci_addr; 2710 goto unmap_pci_addr;
1972 } 2711 }
1973 mhba->instancet->enable_intr(mhba->mmio); 2712 mhba->instancet->enable_intr(mhba);
1974 2713
1975 return 0; 2714 return 0;
1976 2715
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index 10b9237566f0..e360135fd1bd 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -34,51 +34,87 @@
34#define MV_DRIVER_NAME "mvumi" 34#define MV_DRIVER_NAME "mvumi"
35#define PCI_VENDOR_ID_MARVELL_2 0x1b4b 35#define PCI_VENDOR_ID_MARVELL_2 0x1b4b
36#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 36#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
37#define PCI_DEVICE_ID_MARVELL_MV9580 0x9580
37 38
38#define MVUMI_INTERNAL_CMD_WAIT_TIME 45 39#define MVUMI_INTERNAL_CMD_WAIT_TIME 45
40#define MVUMI_INQUIRY_LENGTH 44
41#define MVUMI_INQUIRY_UUID_OFF 36
42#define MVUMI_INQUIRY_UUID_LEN 8
39 43
40#define IS_DMA64 (sizeof(dma_addr_t) == 8) 44#define IS_DMA64 (sizeof(dma_addr_t) == 8)
41 45
42enum mvumi_qc_result { 46enum mvumi_qc_result {
43 MV_QUEUE_COMMAND_RESULT_SENT = 0, 47 MV_QUEUE_COMMAND_RESULT_SENT = 0,
44 MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, 48 MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
45}; 49};
46 50
47enum { 51struct mvumi_hw_regs {
48 /*******************************************/ 52 /* For CPU */
49 53 void *main_int_cause_reg;
50 /* ARM Mbus Registers Map */ 54 void *enpointa_mask_reg;
51 55 void *enpointb_mask_reg;
52 /*******************************************/ 56 void *rstoutn_en_reg;
53 CPU_MAIN_INT_CAUSE_REG = 0x20200, 57 void *ctrl_sts_reg;
54 CPU_MAIN_IRQ_MASK_REG = 0x20204, 58 void *rstoutn_mask_reg;
55 CPU_MAIN_FIQ_MASK_REG = 0x20208, 59 void *sys_soft_rst_reg;
56 CPU_ENPOINTA_MASK_REG = 0x2020C, 60
57 CPU_ENPOINTB_MASK_REG = 0x20210, 61 /* For Doorbell */
58 62 void *pciea_to_arm_drbl_reg;
59 INT_MAP_COMAERR = 1 << 6, 63 void *arm_to_pciea_drbl_reg;
60 INT_MAP_COMAIN = 1 << 7, 64 void *arm_to_pciea_mask_reg;
61 INT_MAP_COMAOUT = 1 << 8, 65 void *pciea_to_arm_msg0;
62 INT_MAP_COMBERR = 1 << 9, 66 void *pciea_to_arm_msg1;
63 INT_MAP_COMBIN = 1 << 10, 67 void *arm_to_pciea_msg0;
64 INT_MAP_COMBOUT = 1 << 11, 68 void *arm_to_pciea_msg1;
65 69
66 INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR), 70 /* reset register */
67 INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR), 71 void *reset_request;
68 72 void *reset_enable;
69 INT_MAP_DL_PCIEA2CPU = 1 << 0, 73
70 INT_MAP_DL_CPU2PCIEA = 1 << 1, 74 /* For Message Unit */
71 75 void *inb_list_basel;
72 /***************************************/ 76 void *inb_list_baseh;
77 void *inb_aval_count_basel;
78 void *inb_aval_count_baseh;
79 void *inb_write_pointer;
80 void *inb_read_pointer;
81 void *outb_list_basel;
82 void *outb_list_baseh;
83 void *outb_copy_basel;
84 void *outb_copy_baseh;
85 void *outb_copy_pointer;
86 void *outb_read_pointer;
87 void *inb_isr_cause;
88 void *outb_isr_cause;
89 void *outb_coal_cfg;
90 void *outb_coal_timeout;
91
92 /* Bit setting for HW */
93 u32 int_comaout;
94 u32 int_comaerr;
95 u32 int_dl_cpu2pciea;
96 u32 int_mu;
97 u32 int_drbl_int_mask;
98 u32 int_main_int_mask;
99 u32 cl_pointer_toggle;
100 u32 cl_slot_num_mask;
101 u32 clic_irq;
102 u32 clic_in_err;
103 u32 clic_out_err;
104};
73 105
74 /* ARM Doorbell Registers Map */ 106struct mvumi_dyn_list_entry {
107 u32 src_low_addr;
108 u32 src_high_addr;
109 u32 if_length;
110 u32 reserve;
111};
75 112
76 /***************************************/ 113#define SCSI_CMD_MARVELL_SPECIFIC 0xE1
77 CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400, 114#define CDB_CORE_MODULE 0x1
78 CPU_PCIEA_TO_ARM_MASK_REG = 0x20404, 115#define CDB_CORE_SHUTDOWN 0xB
79 CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408,
80 CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C,
81 116
117enum {
82 DRBL_HANDSHAKE = 1 << 0, 118 DRBL_HANDSHAKE = 1 << 0,
83 DRBL_SOFT_RESET = 1 << 1, 119 DRBL_SOFT_RESET = 1 << 1,
84 DRBL_BUS_CHANGE = 1 << 2, 120 DRBL_BUS_CHANGE = 1 << 2,
@@ -86,46 +122,6 @@ enum {
86 DRBL_MU_RESET = 1 << 4, 122 DRBL_MU_RESET = 1 << 4,
87 DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, 123 DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
88 124
89 CPU_PCIEA_TO_ARM_MSG0 = 0x20430,
90 CPU_PCIEA_TO_ARM_MSG1 = 0x20434,
91 CPU_ARM_TO_PCIEA_MSG0 = 0x20438,
92 CPU_ARM_TO_PCIEA_MSG1 = 0x2043C,
93
94 /*******************************************/
95
96 /* ARM Communication List Registers Map */
97
98 /*******************************************/
99 CLA_INB_LIST_BASEL = 0x500,
100 CLA_INB_LIST_BASEH = 0x504,
101 CLA_INB_AVAL_COUNT_BASEL = 0x508,
102 CLA_INB_AVAL_COUNT_BASEH = 0x50C,
103 CLA_INB_DESTI_LIST_BASEL = 0x510,
104 CLA_INB_DESTI_LIST_BASEH = 0x514,
105 CLA_INB_WRITE_POINTER = 0x518,
106 CLA_INB_READ_POINTER = 0x51C,
107
108 CLA_OUTB_LIST_BASEL = 0x530,
109 CLA_OUTB_LIST_BASEH = 0x534,
110 CLA_OUTB_SOURCE_LIST_BASEL = 0x538,
111 CLA_OUTB_SOURCE_LIST_BASEH = 0x53C,
112 CLA_OUTB_COPY_POINTER = 0x544,
113 CLA_OUTB_READ_POINTER = 0x548,
114
115 CLA_ISR_CAUSE = 0x560,
116 CLA_ISR_MASK = 0x564,
117
118 INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
119
120 CL_POINTER_TOGGLE = 1 << 12,
121
122 CLIC_IN_IRQ = 1 << 0,
123 CLIC_OUT_IRQ = 1 << 1,
124 CLIC_IN_ERR_IRQ = 1 << 8,
125 CLIC_OUT_ERR_IRQ = 1 << 12,
126
127 CL_SLOT_NUM_MASK = 0xFFF,
128
129 /* 125 /*
130 * Command flag is the flag for the CDB command itself 126 * Command flag is the flag for the CDB command itself
131 */ 127 */
@@ -137,15 +133,23 @@ enum {
137 CMD_FLAG_DATA_IN = 1 << 3, 133 CMD_FLAG_DATA_IN = 1 << 3,
138 /* 1-host write data */ 134 /* 1-host write data */
139 CMD_FLAG_DATA_OUT = 1 << 4, 135 CMD_FLAG_DATA_OUT = 1 << 4,
140 136 CMD_FLAG_PRDT_IN_HOST = 1 << 5,
141 SCSI_CMD_MARVELL_SPECIFIC = 0xE1,
142 CDB_CORE_SHUTDOWN = 0xB,
143}; 137};
144 138
145#define APICDB0_EVENT 0xF4 139#define APICDB0_EVENT 0xF4
146#define APICDB1_EVENT_GETEVENT 0 140#define APICDB1_EVENT_GETEVENT 0
141#define APICDB1_HOST_GETEVENT 1
147#define MAX_EVENTS_RETURNED 6 142#define MAX_EVENTS_RETURNED 6
148 143
144#define DEVICE_OFFLINE 0
145#define DEVICE_ONLINE 1
146
147struct mvumi_hotplug_event {
148 u16 size;
149 u8 dummy[2];
150 u8 bitmap[0];
151};
152
149struct mvumi_driver_event { 153struct mvumi_driver_event {
150 u32 time_stamp; 154 u32 time_stamp;
151 u32 sequence_no; 155 u32 sequence_no;
@@ -172,8 +176,14 @@ struct mvumi_events_wq {
172 void *param; 176 void *param;
173}; 177};
174 178
179#define HS_CAPABILITY_SUPPORT_COMPACT_SG (1U << 4)
180#define HS_CAPABILITY_SUPPORT_PRD_HOST (1U << 5)
181#define HS_CAPABILITY_SUPPORT_DYN_SRC (1U << 6)
182#define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF (1U << 14)
183
175#define MVUMI_MAX_SG_ENTRY 32 184#define MVUMI_MAX_SG_ENTRY 32
176#define SGD_EOT (1L << 27) 185#define SGD_EOT (1L << 27)
186#define SGD_EOT_CP (1L << 22)
177 187
178struct mvumi_sgl { 188struct mvumi_sgl {
179 u32 baseaddr_l; 189 u32 baseaddr_l;
@@ -181,6 +191,39 @@ struct mvumi_sgl {
181 u32 flags; 191 u32 flags;
182 u32 size; 192 u32 size;
183}; 193};
194struct mvumi_compact_sgl {
195 u32 baseaddr_l;
196 u32 baseaddr_h;
197 u32 flags;
198};
199
200#define GET_COMPACT_SGD_SIZE(sgd) \
201 ((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL)
202
203#define SET_COMPACT_SGD_SIZE(sgd, sz) do { \
204 (((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL; \
205 (((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz); \
206} while (0)
207#define sgd_getsz(_mhba, sgd, sz) do { \
208 if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
209 (sz) = GET_COMPACT_SGD_SIZE(sgd); \
210 else \
211 (sz) = (sgd)->size; \
212} while (0)
213
214#define sgd_setsz(_mhba, sgd, sz) do { \
215 if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
216 SET_COMPACT_SGD_SIZE(sgd, sz); \
217 else \
218 (sgd)->size = (sz); \
219} while (0)
220
221#define sgd_inc(_mhba, sgd) do { \
222 if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
223 sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \
224 else \
225 sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \
226} while (0)
184 227
185struct mvumi_res { 228struct mvumi_res {
186 struct list_head entry; 229 struct list_head entry;
@@ -197,7 +240,7 @@ enum resource_type {
197}; 240};
198 241
199struct mvumi_sense_data { 242struct mvumi_sense_data {
200 u8 error_eode:7; 243 u8 error_code:7;
201 u8 valid:1; 244 u8 valid:1;
202 u8 segment_number; 245 u8 segment_number;
203 u8 sense_key:4; 246 u8 sense_key:4;
@@ -220,6 +263,7 @@ struct mvumi_sense_data {
220struct mvumi_cmd { 263struct mvumi_cmd {
221 struct list_head queue_pointer; 264 struct list_head queue_pointer;
222 struct mvumi_msg_frame *frame; 265 struct mvumi_msg_frame *frame;
266 dma_addr_t frame_phys;
223 struct scsi_cmnd *scmd; 267 struct scsi_cmnd *scmd;
224 atomic_t sync_cmd; 268 atomic_t sync_cmd;
225 void *data_buf; 269 void *data_buf;
@@ -393,7 +437,8 @@ struct mvumi_hs_page2 {
393 u16 frame_length; 437 u16 frame_length;
394 438
395 u8 host_type; 439 u8 host_type;
396 u8 reserved[3]; 440 u8 host_cap;
441 u8 reserved[2];
397 struct version_info host_ver; 442 struct version_info host_ver;
398 u32 system_io_bus; 443 u32 system_io_bus;
399 u32 slot_number; 444 u32 slot_number;
@@ -435,8 +480,17 @@ struct mvumi_tag {
435 unsigned short size; 480 unsigned short size;
436}; 481};
437 482
483struct mvumi_device {
484 struct list_head list;
485 struct scsi_device *sdev;
486 u64 wwid;
487 u8 dev_type;
488 int id;
489};
490
438struct mvumi_hba { 491struct mvumi_hba {
439 void *base_addr[MAX_BASE_ADDRESS]; 492 void *base_addr[MAX_BASE_ADDRESS];
493 u32 pci_base[MAX_BASE_ADDRESS];
440 void *mmio; 494 void *mmio;
441 struct list_head cmd_pool; 495 struct list_head cmd_pool;
442 struct Scsi_Host *shost; 496 struct Scsi_Host *shost;
@@ -449,6 +503,9 @@ struct mvumi_hba {
449 void *ib_list; 503 void *ib_list;
450 dma_addr_t ib_list_phys; 504 dma_addr_t ib_list_phys;
451 505
506 void *ib_frame;
507 dma_addr_t ib_frame_phys;
508
452 void *ob_list; 509 void *ob_list;
453 dma_addr_t ob_list_phys; 510 dma_addr_t ob_list_phys;
454 511
@@ -477,12 +534,14 @@ struct mvumi_hba {
477 unsigned char hba_total_pages; 534 unsigned char hba_total_pages;
478 unsigned char fw_flag; 535 unsigned char fw_flag;
479 unsigned char request_id_enabled; 536 unsigned char request_id_enabled;
537 unsigned char eot_flag;
480 unsigned short hba_capability; 538 unsigned short hba_capability;
481 unsigned short io_seq; 539 unsigned short io_seq;
482 540
483 unsigned int ib_cur_slot; 541 unsigned int ib_cur_slot;
484 unsigned int ob_cur_slot; 542 unsigned int ob_cur_slot;
485 unsigned int fw_state; 543 unsigned int fw_state;
544 struct mutex sas_discovery_mutex;
486 545
487 struct list_head ob_data_list; 546 struct list_head ob_data_list;
488 struct list_head free_ob_list; 547 struct list_head free_ob_list;
@@ -491,14 +550,24 @@ struct mvumi_hba {
491 550
492 struct mvumi_tag tag_pool; 551 struct mvumi_tag tag_pool;
493 struct mvumi_cmd **tag_cmd; 552 struct mvumi_cmd **tag_cmd;
553 struct mvumi_hw_regs *regs;
554 struct mutex device_lock;
555 struct list_head mhba_dev_list;
556 struct list_head shost_dev_list;
557 struct task_struct *dm_thread;
558 atomic_t pnp_count;
494}; 559};
495 560
496struct mvumi_instance_template { 561struct mvumi_instance_template {
497 void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *); 562 void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *);
498 void (*enable_intr)(void *) ; 563 void (*enable_intr) (struct mvumi_hba *);
499 void (*disable_intr)(void *); 564 void (*disable_intr) (struct mvumi_hba *);
500 int (*clear_intr)(void *); 565 int (*clear_intr) (void *);
501 unsigned int (*read_fw_status_reg)(void *); 566 unsigned int (*read_fw_status_reg) (struct mvumi_hba *);
567 unsigned int (*check_ib_list) (struct mvumi_hba *);
568 int (*check_ob_list) (struct mvumi_hba *, unsigned int *,
569 unsigned int *);
570 int (*reset_host) (struct mvumi_hba *);
502}; 571};
503 572
504extern struct timezone sys_tz; 573extern struct timezone sys_tz;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 799a58bb9859..48fca47384b7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2080,6 +2080,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2080 uint8_t domain; 2080 uint8_t domain;
2081 char connect_type[22]; 2081 char connect_type[22];
2082 struct qla_hw_data *ha = vha->hw; 2082 struct qla_hw_data *ha = vha->hw;
2083 unsigned long flags;
2083 2084
2084 /* Get host addresses. */ 2085 /* Get host addresses. */
2085 rval = qla2x00_get_adapter_id(vha, 2086 rval = qla2x00_get_adapter_id(vha,
@@ -2154,9 +2155,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2154 vha->d_id.b.area = area; 2155 vha->d_id.b.area = area;
2155 vha->d_id.b.al_pa = al_pa; 2156 vha->d_id.b.al_pa = al_pa;
2156 2157
2157 spin_lock(&ha->vport_slock); 2158 spin_lock_irqsave(&ha->vport_slock, flags);
2158 qlt_update_vp_map(vha, SET_AL_PA); 2159 qlt_update_vp_map(vha, SET_AL_PA);
2159 spin_unlock(&ha->vport_slock); 2160 spin_unlock_irqrestore(&ha->vport_slock, flags);
2160 2161
2161 if (!vha->flags.init_done) 2162 if (!vha->flags.init_done)
2162 ql_log(ql_log_info, vha, 0x2010, 2163 ql_log(ql_log_info, vha, 0x2010,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 57fbd5a3d4e2..5cda11c07c68 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2055,7 +2055,7 @@ static void unmap_region(sector_t lba, unsigned int len)
2055 block = lba + alignment; 2055 block = lba + alignment;
2056 rem = do_div(block, granularity); 2056 rem = do_div(block, granularity);
2057 2057
2058 if (rem == 0 && lba + granularity <= end && block < map_size) { 2058 if (rem == 0 && lba + granularity < end && block < map_size) {
2059 clear_bit(block, map_storep); 2059 clear_bit(block, map_storep);
2060 if (scsi_debug_lbprz) 2060 if (scsi_debug_lbprz)
2061 memset(fake_storep + 2061 memset(fake_storep +
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index de2337f255a7..c1b05a83d403 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -789,7 +789,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
789 int cmnd_size, int timeout, unsigned sense_bytes) 789 int cmnd_size, int timeout, unsigned sense_bytes)
790{ 790{
791 struct scsi_device *sdev = scmd->device; 791 struct scsi_device *sdev = scmd->device;
792 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
793 struct Scsi_Host *shost = sdev->host; 792 struct Scsi_Host *shost = sdev->host;
794 DECLARE_COMPLETION_ONSTACK(done); 793 DECLARE_COMPLETION_ONSTACK(done);
795 unsigned long timeleft; 794 unsigned long timeleft;
@@ -845,8 +844,11 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
845 844
846 scsi_eh_restore_cmnd(scmd, &ses); 845 scsi_eh_restore_cmnd(scmd, &ses);
847 846
848 if (sdrv && sdrv->eh_action) 847 if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
849 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); 848 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
849 if (sdrv->eh_action)
850 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
851 }
850 852
851 return rtn; 853 return rtn;
852} 854}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 528d52beaa1c..01440782feb2 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1221 /* 1221 /*
1222 * At this point, all outstanding requests in the adapter 1222 * At this point, all outstanding requests in the adapter
1223 * should have been flushed out and return to us 1223 * should have been flushed out and return to us
1224 * There is a potential race here where the host may be in
1225 * the process of responding when we return from here.
1226 * Just wait for all in-transit packets to be accounted for
1227 * before we return from here.
1224 */ 1228 */
1229 storvsc_wait_to_drain(stor_device);
1225 1230
1226 return SUCCESS; 1231 return SUCCESS;
1227} 1232}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3e79a2f00042..595af1ae4421 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
219 struct scatterlist sg; 219 struct scatterlist sg;
220 unsigned long flags; 220 unsigned long flags;
221 221
222 sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); 222 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
223 223
224 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 224 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
225 225
@@ -279,6 +279,31 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
279 } 279 }
280} 280}
281 281
282static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
283 struct virtio_scsi_event *event)
284{
285 struct scsi_device *sdev;
286 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
287 unsigned int target = event->lun[1];
288 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
289 u8 asc = event->reason & 255;
290 u8 ascq = event->reason >> 8;
291
292 sdev = scsi_device_lookup(shost, 0, target, lun);
293 if (!sdev) {
294 pr_err("SCSI device %d 0 %d %d not found\n",
295 shost->host_no, target, lun);
296 return;
297 }
298
299 /* Handle "Parameters changed", "Mode parameters changed", and
300 "Capacity data has changed". */
301 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
302 scsi_rescan_device(&sdev->sdev_gendev);
303
304 scsi_device_put(sdev);
305}
306
282static void virtscsi_handle_event(struct work_struct *work) 307static void virtscsi_handle_event(struct work_struct *work)
283{ 308{
284 struct virtio_scsi_event_node *event_node = 309 struct virtio_scsi_event_node *event_node =
@@ -297,6 +322,9 @@ static void virtscsi_handle_event(struct work_struct *work)
297 case VIRTIO_SCSI_T_TRANSPORT_RESET: 322 case VIRTIO_SCSI_T_TRANSPORT_RESET:
298 virtscsi_handle_transport_reset(vscsi, event); 323 virtscsi_handle_transport_reset(vscsi, event);
299 break; 324 break;
325 case VIRTIO_SCSI_T_PARAM_CHANGE:
326 virtscsi_handle_param_change(vscsi, event);
327 break;
300 default: 328 default:
301 pr_err("Unsupport virtio scsi event %x\n", event->event); 329 pr_err("Unsupport virtio scsi event %x\n", event->event);
302 } 330 }
@@ -677,7 +705,11 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
677 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; 705 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
678 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); 706 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
679 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; 707 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
680 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; 708
709 /* LUNs > 256 are reported with format 1, so they go in the range
710 * 16640-32767.
711 */
712 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
681 shost->max_id = num_targets; 713 shost->max_id = num_targets;
682 shost->max_channel = 0; 714 shost->max_channel = 0;
683 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 715 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
@@ -733,7 +765,8 @@ static struct virtio_device_id id_table[] = {
733}; 765};
734 766
735static unsigned int features[] = { 767static unsigned int features[] = {
736 VIRTIO_SCSI_F_HOTPLUG 768 VIRTIO_SCSI_F_HOTPLUG,
769 VIRTIO_SCSI_F_CHANGE,
737}; 770};
738 771
739static struct virtio_driver virtio_scsi_driver = { 772static struct virtio_driver virtio_scsi_driver = {
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
index dc8d305b0e05..d6b4440387b7 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/linux/virtio_scsi.h
@@ -72,6 +72,7 @@ struct virtio_scsi_config {
72/* Feature Bits */ 72/* Feature Bits */
73#define VIRTIO_SCSI_F_INOUT 0 73#define VIRTIO_SCSI_F_INOUT 0
74#define VIRTIO_SCSI_F_HOTPLUG 1 74#define VIRTIO_SCSI_F_HOTPLUG 1
75#define VIRTIO_SCSI_F_CHANGE 2
75 76
76/* Response codes */ 77/* Response codes */
77#define VIRTIO_SCSI_S_OK 0 78#define VIRTIO_SCSI_S_OK 0
@@ -108,6 +109,7 @@ struct virtio_scsi_config {
108#define VIRTIO_SCSI_T_NO_EVENT 0 109#define VIRTIO_SCSI_T_NO_EVENT 0
109#define VIRTIO_SCSI_T_TRANSPORT_RESET 1 110#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
110#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2 111#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
112#define VIRTIO_SCSI_T_PARAM_CHANGE 3
111 113
112/* Reasons of transport reset event */ 114/* Reasons of transport reset event */
113#define VIRTIO_SCSI_EVT_RESET_HARD 0 115#define VIRTIO_SCSI_EVT_RESET_HARD 0
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 0d7d67e96d43..9c8702942b61 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -127,6 +127,9 @@ struct fcp_txrdy {
127 * 127 *
128 * All response frames will always contain the fcp_resp template. Some 128 * All response frames will always contain the fcp_resp template. Some
129 * will also include the fcp_resp_len template. 129 * will also include the fcp_resp_len template.
130 *
131 * From Table 23, the FCP_RSP_INFO can either be 4 bytes or 8 bytes, both
132 * are valid length.
130 */ 133 */
131struct fcp_resp { 134struct fcp_resp {
132 __u8 _fr_resvd[8]; /* reserved */ 135 __u8 _fr_resvd[8]; /* reserved */
@@ -156,6 +159,9 @@ struct fcp_resp_rsp_info {
156 __u8 _fr_resvd2[4]; /* reserved */ 159 __u8 _fr_resvd2[4]; /* reserved */
157}; 160};
158 161
162#define FCP_RESP_RSP_INFO_LEN4 4 /* without reserved field */
163#define FCP_RESP_RSP_INFO_LEN8 8 /* with reserved field */
164
159struct fcp_resp_with_ext { 165struct fcp_resp_with_ext {
160 struct fcp_resp resp; 166 struct fcp_resp resp;
161 struct fcp_resp_ext ext; 167 struct fcp_resp_ext ext;
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 22b07cc99808..8742d853a3b8 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -327,7 +327,6 @@ struct fcoe_percpu_s {
327 * @lport: The associated local port 327 * @lport: The associated local port
328 * @fcoe_pending_queue: The pending Rx queue of skbs 328 * @fcoe_pending_queue: The pending Rx queue of skbs
329 * @fcoe_pending_queue_active: Indicates if the pending queue is active 329 * @fcoe_pending_queue_active: Indicates if the pending queue is active
330 * @priority: Packet priority (DCB)
331 * @max_queue_depth: Max queue depth of pending queue 330 * @max_queue_depth: Max queue depth of pending queue
332 * @min_queue_depth: Min queue depth of pending queue 331 * @min_queue_depth: Min queue depth of pending queue
333 * @timer: The queue timer 332 * @timer: The queue timer
@@ -343,7 +342,6 @@ struct fcoe_port {
343 struct fc_lport *lport; 342 struct fc_lport *lport;
344 struct sk_buff_head fcoe_pending_queue; 343 struct sk_buff_head fcoe_pending_queue;
345 u8 fcoe_pending_queue_active; 344 u8 fcoe_pending_queue_active;
346 u8 priority;
347 u32 max_queue_depth; 345 u32 max_queue_depth;
348 u32 min_queue_depth; 346 u32 min_queue_depth;
349 struct timer_list timer; 347 struct timer_list timer;
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index ac06cc595890..de5f5d8f1f8a 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -132,18 +132,10 @@ struct scsi_cmnd {
132 unsigned char tag; /* SCSI-II queued command tag */ 132 unsigned char tag; /* SCSI-II queued command tag */
133}; 133};
134 134
135/* make sure not to use it with REQ_TYPE_BLOCK_PC commands */
135static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) 136static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
136{ 137{
137 struct scsi_driver **sdp; 138 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
138
139 if (!cmd->request->rq_disk)
140 return NULL;
141
142 sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
143 if (!sdp)
144 return NULL;
145
146 return *sdp;
147} 139}
148 140
149extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); 141extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);