aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-23 14:13:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-23 14:13:11 -0400
commitd4e06701b89286a306b31e20ec69a904fae374a1 (patch)
treef6adefd65b021ccddb7655109ea8b9ab4e714292
parente4980371059ca4a81ccdcb4381c41af8869ca711 (diff)
parent87045b033a62777337ae4aa62834876da09b5fb5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (77 commits) [SCSI] fix crash in scsi_dispatch_cmd() [SCSI] sr: check_events() ignore GET_EVENT when TUR says otherwise [SCSI] bnx2i: Fixed kernel panic due to illegal usage of sc->request->cpu [SCSI] bfa: Update the driver version to 3.0.2.1 [SCSI] bfa: Driver and BSG enhancements. [SCSI] bfa: Added support to query PHY. [SCSI] bfa: Added HBA diagnostics support. [SCSI] bfa: Added support for flash configuration [SCSI] bfa: Added support to obtain SFP info. [SCSI] bfa: Added support for CEE info and stats query. [SCSI] bfa: Extend BSG interface. [SCSI] bfa: FCS bug fixes. [SCSI] bfa: DMA memory allocation enhancement. [SCSI] bfa: Brocade-1860 Fabric Adapter vHBA support. [SCSI] bfa: Brocade-1860 Fabric Adapter PLL init fixes. [SCSI] bfa: Added Fabric Assigned Address(FAA) support [SCSI] bfa: IOC bug fixes. [SCSI] bfa: Enable ASIC block configuration and query. [SCSI] bnx2i: Updated copyright and bump version [SCSI] bnx2i: Modified to skip CNIC registration if iSCSI is not supported ... Fix up some trivial conflicts in: - drivers/scsi/bnx2fc/{bnx2fc.h,bnx2fc_fcoe.c}: Crazy broadcom version number conflicts - drivers/target/tcm_fc/tfc_cmd.c Just trivial cleanups done on adjacent lines
-rw-r--r--MAINTAINERS9
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-exec.c7
-rw-r--r--drivers/firmware/iscsi_ibft.c14
-rw-r--r--drivers/scsi/aha152x.c17
-rw-r--r--drivers/scsi/atari_NCR5380.c6
-rw-r--r--drivers/scsi/atari_scsi.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c199
-rw-r--r--drivers/scsi/bfa/Makefile2
-rw-r--r--drivers/scsi/bfa/bfa.h144
-rw-r--r--drivers/scsi/bfa/bfa_core.c878
-rw-r--r--drivers/scsi/bfa/bfa_defs.h583
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h27
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h170
-rw-r--r--drivers/scsi/bfa/bfa_fc.h11
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c49
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h16
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c478
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h89
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c153
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h31
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c10
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c329
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c121
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c94
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c89
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2992
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h609
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c69
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c516
-rw-r--r--drivers/scsi/bfa/bfa_modules.h27
-rw-r--r--drivers/scsi/bfa/bfa_port.c428
-rw-r--r--drivers/scsi/bfa/bfa_port.h62
-rw-r--r--drivers/scsi/bfa/bfa_svc.c1136
-rw-r--r--drivers/scsi/bfa/bfa_svc.h151
-rw-r--r--drivers/scsi/bfa/bfad.c295
-rw-r--r--drivers/scsi/bfa/bfad_attr.c53
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2163
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h509
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c14
-rw-r--r--drivers/scsi/bfa/bfad_drv.h26
-rw-r--r--drivers/scsi/bfa/bfad_im.c32
-rw-r--r--drivers/scsi/bfa/bfad_im.h3
-rw-r--r--drivers/scsi/bfa/bfi.h637
-rw-r--r--drivers/scsi/bfa/bfi_cbreg.h305
-rw-r--r--drivers/scsi/bfa/bfi_ctreg.h636
-rw-r--r--drivers/scsi/bfa/bfi_ms.h159
-rw-r--r--drivers/scsi/bfa/bfi_reg.h450
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c31
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c24
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h33
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c199
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c153
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c174
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c21
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c31
-rw-r--r--drivers/scsi/iscsi_tcp.c61
-rw-r--r--drivers/scsi/libfc/fc_exch.c26
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c14
-rw-r--r--drivers/scsi/libiscsi.c14
-rw-r--r--drivers/scsi/libiscsi_tcp.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c3
-rw-r--r--drivers/scsi/mac_scsi.c14
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h12
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h74
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c84
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h77
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c279
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c24
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/ses.c6
-rw-r--r--drivers/scsi/sr.c46
-rw-r--r--drivers/scsi/sr.h7
-rw-r--r--drivers/scsi/sun3_NCR5380.c98
-rw-r--r--drivers/scsi/sun3_scsi.c11
-rw-r--r--drivers/scsi/sun3_scsi_vme.c11
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c26
-rw-r--r--include/linux/iscsi_boot_sysfs.h16
-rw-r--r--include/scsi/iscsi_proto.h18
-rw-r--r--include/scsi/libfc.h8
-rw-r--r--include/scsi/libiscsi.h2
96 files changed, 13311 insertions, 3194 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 75214405d61a..612316c352a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1553,6 +1553,12 @@ L: linux-wireless@vger.kernel.org
1553S: Supported 1553S: Supported
1554F: drivers/staging/brcm80211/ 1554F: drivers/staging/brcm80211/
1555 1555
1556BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
1557M: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
1558L: linux-scsi@vger.kernel.org
1559S: Supported
1560F: drivers/scsi/bnx2fc/
1561
1556BROCADE BFA FC SCSI DRIVER 1562BROCADE BFA FC SCSI DRIVER
1557M: Jing Huang <huangj@brocade.com> 1563M: Jing Huang <huangj@brocade.com>
1558L: linux-scsi@vger.kernel.org 1564L: linux-scsi@vger.kernel.org
@@ -1775,7 +1781,8 @@ F: include/linux/clk.h
1775 1781
1776CISCO FCOE HBA DRIVER 1782CISCO FCOE HBA DRIVER
1777M: Abhijeet Joglekar <abjoglek@cisco.com> 1783M: Abhijeet Joglekar <abjoglek@cisco.com>
1778M: Joe Eykholt <jeykholt@cisco.com> 1784M: Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com>
1785M: Brian Uchino <buchino@cisco.com>
1779L: linux-scsi@vger.kernel.org 1786L: linux-scsi@vger.kernel.org
1780S: Supported 1787S: Supported
1781F: drivers/scsi/fnic/ 1788F: drivers/scsi/fnic/
diff --git a/block/blk-core.c b/block/blk-core.c
index d2f8f4049abd..1d49e1c7c905 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -839,6 +839,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
839{ 839{
840 struct request *rq; 840 struct request *rq;
841 841
842 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
843 return NULL;
844
842 BUG_ON(rw != READ && rw != WRITE); 845 BUG_ON(rw != READ && rw != WRITE);
843 846
844 spin_lock_irq(q->queue_lock); 847 spin_lock_irq(q->queue_lock);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 8a0e7ec056e7..a1ebceb332f9 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -50,6 +50,13 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
50{ 50{
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52
53 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
54 rq->errors = -ENXIO;
55 if (rq->end_io)
56 rq->end_io(rq, rq->errors);
57 return;
58 }
59
53 rq->rq_disk = bd_disk; 60 rq->rq_disk = bd_disk;
54 rq->end_io = done; 61 rq->end_io = done;
55 WARN_ON(irqs_disabled()); 62 WARN_ON(irqs_disabled());
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index ce33f4626957..c811cb107904 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -566,6 +566,11 @@ static mode_t __init ibft_check_initiator_for(void *data, int type)
566 return rc; 566 return rc;
567} 567}
568 568
569static void ibft_kobj_release(void *data)
570{
571 kfree(data);
572}
573
569/* 574/*
570 * Helper function for ibft_register_kobjects. 575 * Helper function for ibft_register_kobjects.
571 */ 576 */
@@ -595,7 +600,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
595 boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index, 600 boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
596 ibft_kobj, 601 ibft_kobj,
597 ibft_attr_show_initiator, 602 ibft_attr_show_initiator,
598 ibft_check_initiator_for); 603 ibft_check_initiator_for,
604 ibft_kobj_release);
599 if (!boot_kobj) { 605 if (!boot_kobj) {
600 rc = -ENOMEM; 606 rc = -ENOMEM;
601 goto free_ibft_obj; 607 goto free_ibft_obj;
@@ -610,7 +616,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
610 boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index, 616 boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
611 ibft_kobj, 617 ibft_kobj,
612 ibft_attr_show_nic, 618 ibft_attr_show_nic,
613 ibft_check_nic_for); 619 ibft_check_nic_for,
620 ibft_kobj_release);
614 if (!boot_kobj) { 621 if (!boot_kobj) {
615 rc = -ENOMEM; 622 rc = -ENOMEM;
616 goto free_ibft_obj; 623 goto free_ibft_obj;
@@ -625,7 +632,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
625 boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index, 632 boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
626 ibft_kobj, 633 ibft_kobj,
627 ibft_attr_show_target, 634 ibft_attr_show_target,
628 ibft_check_tgt_for); 635 ibft_check_tgt_for,
636 ibft_kobj_release);
629 if (!boot_kobj) { 637 if (!boot_kobj) {
630 rc = -ENOMEM; 638 rc = -ENOMEM;
631 goto free_ibft_obj; 639 goto free_ibft_obj;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index c5169f01c1cd..f17c92cf808b 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -422,10 +422,19 @@ MODULE_PARM_DESC(aha152x1, "parameters for second controller");
422 422
423#ifdef __ISAPNP__ 423#ifdef __ISAPNP__
424static struct isapnp_device_id id_table[] __devinitdata = { 424static struct isapnp_device_id id_table[] __devinitdata = {
425 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, 425 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 },
426 ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1505), 0 }, 426 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 },
427 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, 427 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 },
428 ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1530), 0 }, 428 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 },
429 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 },
430 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 },
431 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 },
432 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 },
433 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 },
434 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 },
435 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 },
436 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 },
437 { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 },
429 { ISAPNP_DEVICE_SINGLE_END, } 438 { ISAPNP_DEVICE_SINGLE_END, }
430}; 439};
431MODULE_DEVICE_TABLE(isapnp, id_table); 440MODULE_DEVICE_TABLE(isapnp, id_table);
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index ea439f93ed81..2db79b469d9e 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -892,6 +892,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
892 return 0; 892 return 0;
893} 893}
894 894
895static void NCR5380_exit(struct Scsi_Host *instance)
896{
897 /* Empty, as we didn't schedule any delayed work */
898}
899
895/* 900/*
896 * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, 901 * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
897 * void (*done)(Scsi_Cmnd *)) 902 * void (*done)(Scsi_Cmnd *))
@@ -914,7 +919,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
914{ 919{
915 SETUP_HOSTDATA(cmd->device->host); 920 SETUP_HOSTDATA(cmd->device->host);
916 Scsi_Cmnd *tmp; 921 Scsi_Cmnd *tmp;
917 int oldto;
918 unsigned long flags; 922 unsigned long flags;
919 923
920#if (NDEBUG & NDEBUG_NO_WRITE) 924#if (NDEBUG & NDEBUG_NO_WRITE)
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 3e8658e2f154..04a154f87e3e 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -730,6 +730,7 @@ int atari_scsi_release(struct Scsi_Host *sh)
730 free_irq(IRQ_TT_MFP_SCSI, sh); 730 free_irq(IRQ_TT_MFP_SCSI, sh);
731 if (atari_dma_buffer) 731 if (atari_dma_buffer)
732 atari_stram_free(atari_dma_buffer); 732 atari_stram_free(atari_dma_buffer);
733 NCR5380_exit(sh);
733 return 1; 734 return 1;
734} 735}
735 736
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 94b9a07845d5..0a9bdfa3d939 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -215,73 +215,62 @@ unlock:
215static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 215static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
216{ 216{
217 struct beiscsi_hba *phba = data; 217 struct beiscsi_hba *phba = data;
218 struct mgmt_session_info *boot_sess = &phba->boot_sess;
219 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
218 char *str = buf; 220 char *str = buf;
219 int rc; 221 int rc;
220 222
221 switch (type) { 223 switch (type) {
222 case ISCSI_BOOT_TGT_NAME: 224 case ISCSI_BOOT_TGT_NAME:
223 rc = sprintf(buf, "%.*s\n", 225 rc = sprintf(buf, "%.*s\n",
224 (int)strlen(phba->boot_sess.target_name), 226 (int)strlen(boot_sess->target_name),
225 (char *)&phba->boot_sess.target_name); 227 (char *)&boot_sess->target_name);
226 break; 228 break;
227 case ISCSI_BOOT_TGT_IP_ADDR: 229 case ISCSI_BOOT_TGT_IP_ADDR:
228 if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1) 230 if (boot_conn->dest_ipaddr.ip_type == 0x1)
229 rc = sprintf(buf, "%pI4\n", 231 rc = sprintf(buf, "%pI4\n",
230 (char *)&phba->boot_sess.conn_list[0]. 232 (char *)&boot_conn->dest_ipaddr.ip_address);
231 dest_ipaddr.ip_address);
232 else 233 else
233 rc = sprintf(str, "%pI6\n", 234 rc = sprintf(str, "%pI6\n",
234 (char *)&phba->boot_sess.conn_list[0]. 235 (char *)&boot_conn->dest_ipaddr.ip_address);
235 dest_ipaddr.ip_address);
236 break; 236 break;
237 case ISCSI_BOOT_TGT_PORT: 237 case ISCSI_BOOT_TGT_PORT:
238 rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0]. 238 rc = sprintf(str, "%d\n", boot_conn->dest_port);
239 dest_port);
240 break; 239 break;
241 240
242 case ISCSI_BOOT_TGT_CHAP_NAME: 241 case ISCSI_BOOT_TGT_CHAP_NAME:
243 rc = sprintf(str, "%.*s\n", 242 rc = sprintf(str, "%.*s\n",
244 phba->boot_sess.conn_list[0]. 243 boot_conn->negotiated_login_options.auth_data.chap.
245 negotiated_login_options.auth_data.chap. 244 target_chap_name_length,
246 target_chap_name_length, 245 (char *)&boot_conn->negotiated_login_options.
247 (char *)&phba->boot_sess.conn_list[0]. 246 auth_data.chap.target_chap_name);
248 negotiated_login_options.auth_data.chap.
249 target_chap_name);
250 break; 247 break;
251 case ISCSI_BOOT_TGT_CHAP_SECRET: 248 case ISCSI_BOOT_TGT_CHAP_SECRET:
252 rc = sprintf(str, "%.*s\n", 249 rc = sprintf(str, "%.*s\n",
253 phba->boot_sess.conn_list[0]. 250 boot_conn->negotiated_login_options.auth_data.chap.
254 negotiated_login_options.auth_data.chap. 251 target_secret_length,
255 target_secret_length, 252 (char *)&boot_conn->negotiated_login_options.
256 (char *)&phba->boot_sess.conn_list[0]. 253 auth_data.chap.target_secret);
257 negotiated_login_options.auth_data.chap.
258 target_secret);
259
260 break; 254 break;
261 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 255 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
262 rc = sprintf(str, "%.*s\n", 256 rc = sprintf(str, "%.*s\n",
263 phba->boot_sess.conn_list[0]. 257 boot_conn->negotiated_login_options.auth_data.chap.
264 negotiated_login_options.auth_data.chap. 258 intr_chap_name_length,
265 intr_chap_name_length, 259 (char *)&boot_conn->negotiated_login_options.
266 (char *)&phba->boot_sess.conn_list[0]. 260 auth_data.chap.intr_chap_name);
267 negotiated_login_options.auth_data.chap.
268 intr_chap_name);
269
270 break; 261 break;
271 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 262 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
272 rc = sprintf(str, "%.*s\n", 263 rc = sprintf(str, "%.*s\n",
273 phba->boot_sess.conn_list[0]. 264 boot_conn->negotiated_login_options.auth_data.chap.
274 negotiated_login_options.auth_data.chap. 265 intr_secret_length,
275 intr_secret_length, 266 (char *)&boot_conn->negotiated_login_options.
276 (char *)&phba->boot_sess.conn_list[0]. 267 auth_data.chap.intr_secret);
277 negotiated_login_options.auth_data.chap.
278 intr_secret);
279 break; 268 break;
280 case ISCSI_BOOT_TGT_FLAGS: 269 case ISCSI_BOOT_TGT_FLAGS:
281 rc = sprintf(str, "2\n"); 270 rc = sprintf(str, "2\n");
282 break; 271 break;
283 case ISCSI_BOOT_TGT_NIC_ASSOC: 272 case ISCSI_BOOT_TGT_NIC_ASSOC:
284 rc = sprintf(str, "0\n"); 273 rc = sprintf(str, "0\n");
285 break; 274 break;
286 default: 275 default:
287 rc = -ENOSYS; 276 rc = -ENOSYS;
@@ -315,10 +304,10 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
315 304
316 switch (type) { 305 switch (type) {
317 case ISCSI_BOOT_ETH_FLAGS: 306 case ISCSI_BOOT_ETH_FLAGS:
318 rc = sprintf(str, "2\n"); 307 rc = sprintf(str, "2\n");
319 break; 308 break;
320 case ISCSI_BOOT_ETH_INDEX: 309 case ISCSI_BOOT_ETH_INDEX:
321 rc = sprintf(str, "0\n"); 310 rc = sprintf(str, "0\n");
322 break; 311 break;
323 case ISCSI_BOOT_ETH_MAC: 312 case ISCSI_BOOT_ETH_MAC:
324 rc = beiscsi_get_macaddr(buf, phba); 313 rc = beiscsi_get_macaddr(buf, phba);
@@ -391,40 +380,6 @@ static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
391 return rc; 380 return rc;
392} 381}
393 382
394static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
395{
396 struct iscsi_boot_kobj *boot_kobj;
397
398 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
399 if (!phba->boot_kset)
400 return -ENOMEM;
401
402 /* get boot info using mgmt cmd */
403 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
404 beiscsi_show_boot_tgt_info,
405 beiscsi_tgt_get_attr_visibility);
406 if (!boot_kobj)
407 goto free_kset;
408
409 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
410 beiscsi_show_boot_ini_info,
411 beiscsi_ini_get_attr_visibility);
412 if (!boot_kobj)
413 goto free_kset;
414
415 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
416 beiscsi_show_boot_eth_info,
417 beiscsi_eth_get_attr_visibility);
418 if (!boot_kobj)
419 goto free_kset;
420 return 0;
421
422free_kset:
423 if (phba->boot_kset)
424 iscsi_boot_destroy_kset(phba->boot_kset);
425 return -ENOMEM;
426}
427
428/*------------------- PCI Driver operations and data ----------------- */ 383/*------------------- PCI Driver operations and data ----------------- */
429static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 384static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
430 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 385 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -483,14 +438,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
483 if (iscsi_host_add(shost, &phba->pcidev->dev)) 438 if (iscsi_host_add(shost, &phba->pcidev->dev))
484 goto free_devices; 439 goto free_devices;
485 440
486 if (beiscsi_setup_boot_info(phba))
487 /*
488 * log error but continue, because we may not be using
489 * iscsi boot.
490 */
491 shost_printk(KERN_ERR, phba->shost, "Could not set up "
492 "iSCSI boot info.");
493
494 return phba; 441 return phba;
495 442
496free_devices: 443free_devices:
@@ -3511,6 +3458,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3511 unsigned int tag, wrb_num; 3458 unsigned int tag, wrb_num;
3512 unsigned short status, extd_status; 3459 unsigned short status, extd_status;
3513 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 3460 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3461 int ret = -ENOMEM;
3514 3462
3515 tag = beiscsi_get_boot_target(phba); 3463 tag = beiscsi_get_boot_target(phba);
3516 if (!tag) { 3464 if (!tag) {
@@ -3535,8 +3483,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3535 boot_resp = embedded_payload(wrb); 3483 boot_resp = embedded_payload(wrb);
3536 3484
3537 if (boot_resp->boot_session_handle < 0) { 3485 if (boot_resp->boot_session_handle < 0) {
3538 printk(KERN_ERR "No Boot Session for this pci_func," 3486 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
3539 "session Hndl = %d\n", boot_resp->boot_session_handle);
3540 return -ENXIO; 3487 return -ENXIO;
3541 } 3488 }
3542 3489
@@ -3574,14 +3521,70 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3574 wrb = queue_get_wrb(mccq, wrb_num); 3521 wrb = queue_get_wrb(mccq, wrb_num);
3575 free_mcc_tag(&phba->ctrl, tag); 3522 free_mcc_tag(&phba->ctrl, tag);
3576 session_resp = nonemb_cmd.va ; 3523 session_resp = nonemb_cmd.va ;
3524
3577 memcpy(&phba->boot_sess, &session_resp->session_info, 3525 memcpy(&phba->boot_sess, &session_resp->session_info,
3578 sizeof(struct mgmt_session_info)); 3526 sizeof(struct mgmt_session_info));
3579 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 3527 ret = 0;
3580 nonemb_cmd.va, nonemb_cmd.dma); 3528
3581 return 0;
3582boot_freemem: 3529boot_freemem:
3583 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 3530 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3584 nonemb_cmd.va, nonemb_cmd.dma); 3531 nonemb_cmd.va, nonemb_cmd.dma);
3532 return ret;
3533}
3534
3535static void beiscsi_boot_release(void *data)
3536{
3537 struct beiscsi_hba *phba = data;
3538
3539 scsi_host_put(phba->shost);
3540}
3541
3542static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3543{
3544 struct iscsi_boot_kobj *boot_kobj;
3545
3546 /* get boot info using mgmt cmd */
3547 if (beiscsi_get_boot_info(phba))
3548 /* Try to see if we can carry on without this */
3549 return 0;
3550
3551 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3552 if (!phba->boot_kset)
3553 return -ENOMEM;
3554
3555 /* get a ref because the show function will ref the phba */
3556 if (!scsi_host_get(phba->shost))
3557 goto free_kset;
3558 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3559 beiscsi_show_boot_tgt_info,
3560 beiscsi_tgt_get_attr_visibility,
3561 beiscsi_boot_release);
3562 if (!boot_kobj)
3563 goto put_shost;
3564
3565 if (!scsi_host_get(phba->shost))
3566 goto free_kset;
3567 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3568 beiscsi_show_boot_ini_info,
3569 beiscsi_ini_get_attr_visibility,
3570 beiscsi_boot_release);
3571 if (!boot_kobj)
3572 goto put_shost;
3573
3574 if (!scsi_host_get(phba->shost))
3575 goto free_kset;
3576 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3577 beiscsi_show_boot_eth_info,
3578 beiscsi_eth_get_attr_visibility,
3579 beiscsi_boot_release);
3580 if (!boot_kobj)
3581 goto put_shost;
3582 return 0;
3583
3584put_shost:
3585 scsi_host_put(phba->shost);
3586free_kset:
3587 iscsi_boot_destroy_kset(phba->boot_kset);
3585 return -ENOMEM; 3588 return -ENOMEM;
3586} 3589}
3587 3590
@@ -3963,11 +3966,10 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3963 } 3966 }
3964 memcpy(&io_task->cmd_bhs->iscsi_data_pdu. 3967 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3965 dw[offsetof(struct amap_pdu_data_out, lun) / 32], 3968 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3966 io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun)); 3969 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3967 3970
3968 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 3971 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3969 cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr. 3972 cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
3970 lun[0]));
3971 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 3973 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3972 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 3974 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3973 io_task->pwrb_handle->wrb_index); 3975 io_task->pwrb_handle->wrb_index);
@@ -4150,8 +4152,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4150 phba->ctrl.mbox_mem_alloced.size, 4152 phba->ctrl.mbox_mem_alloced.size,
4151 phba->ctrl.mbox_mem_alloced.va, 4153 phba->ctrl.mbox_mem_alloced.va,
4152 phba->ctrl.mbox_mem_alloced.dma); 4154 phba->ctrl.mbox_mem_alloced.dma);
4153 if (phba->boot_kset) 4155 iscsi_boot_destroy_kset(phba->boot_kset);
4154 iscsi_boot_destroy_kset(phba->boot_kset);
4155 iscsi_host_remove(phba->shost); 4156 iscsi_host_remove(phba->shost);
4156 pci_dev_put(phba->pcidev); 4157 pci_dev_put(phba->pcidev);
4157 iscsi_host_free(phba->shost); 4158 iscsi_host_free(phba->shost);
@@ -4310,11 +4311,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4310 goto free_blkenbld; 4311 goto free_blkenbld;
4311 } 4312 }
4312 hwi_enable_intr(phba); 4313 hwi_enable_intr(phba);
4313 ret = beiscsi_get_boot_info(phba); 4314
4314 if (ret < 0) { 4315 if (beiscsi_setup_boot_info(phba))
4315 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4316 /*
4316 "No Boot Devices !!!!!\n"); 4317 * log error but continue, because we may not be using
4317 } 4318 * iscsi boot.
4319 */
4320 shost_printk(KERN_ERR, phba->shost, "Could not set up "
4321 "iSCSI boot info.");
4322
4318 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); 4323 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4319 return 0; 4324 return 0;
4320 4325
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 4ce6f4942327..475cf925d5e8 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o 1obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o 3bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
4bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o 4bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
5bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o 5bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
6bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o 6bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 7be6b5a8114b..3b0af1102bf4 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,7 +27,6 @@
27struct bfa_s; 27struct bfa_s;
28 28
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); 29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
31 30
32/* 31/*
33 * Interrupt message handlers 32 * Interrupt message handlers
@@ -54,7 +53,8 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
54 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \ 53 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
55 + bfa_reqq_pi((__bfa), (__reqq))))) 54 + bfa_reqq_pi((__bfa), (__reqq)))))
56 55
57#define bfa_reqq_produce(__bfa, __reqq) do { \ 56#define bfa_reqq_produce(__bfa, __reqq, __mh) do { \
57 (__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\
58 (__bfa)->iocfc.req_cq_pi[__reqq]++; \ 58 (__bfa)->iocfc.req_cq_pi[__reqq]++; \
59 (__bfa)->iocfc.req_cq_pi[__reqq] &= \ 59 (__bfa)->iocfc.req_cq_pi[__reqq] &= \
60 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ 60 ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
@@ -76,16 +76,6 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
76} while (0) 76} while (0)
77 77
78/* 78/*
79 * Queue element to wait for room in request queue. FIFO order is
80 * maintained when fullfilling requests.
81 */
82struct bfa_reqq_wait_s {
83 struct list_head qe;
84 void (*qresume) (void *cbarg);
85 void *cbarg;
86};
87
88/*
89 * Circular queue usage assignments 79 * Circular queue usage assignments
90 */ 80 */
91enum { 81enum {
@@ -128,18 +118,6 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
128 118
129#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) 119#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
130 120
131
132/*
133 * Generic BFA callback element.
134 */
135struct bfa_cb_qe_s {
136 struct list_head qe;
137 bfa_cb_cbfn_t cbfn;
138 bfa_boolean_t once;
139 u32 rsvd;
140 void *cbarg;
141};
142
143#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ 121#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
144 (__hcb_qe)->cbfn = (__cbfn); \ 122 (__hcb_qe)->cbfn = (__cbfn); \
145 (__hcb_qe)->cbarg = (__cbarg); \ 123 (__hcb_qe)->cbarg = (__cbarg); \
@@ -172,44 +150,14 @@ struct bfa_pciid_s {
172 150
173extern char bfa_version[]; 151extern char bfa_version[];
174 152
175/*
176 * BFA memory resources
177 */
178enum bfa_mem_type {
179 BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */
180 BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */
181 BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
182};
183
184struct bfa_mem_elem_s {
185 enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
186 u32 mem_len; /* Total Length in Bytes */
187 u8 *kva; /* kernel virtual address */
188 u64 dma; /* dma address if DMA memory */
189 u8 *kva_curp; /* kva allocation cursor */
190 u64 dma_curp; /* dma allocation cursor */
191};
192
193struct bfa_meminfo_s {
194 struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
195};
196#define bfa_meminfo_kva(_m) \
197 ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
198#define bfa_meminfo_dma_virt(_m) \
199 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
200#define bfa_meminfo_dma_phys(_m) \
201 ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
202
203struct bfa_iocfc_regs_s { 153struct bfa_iocfc_regs_s {
204 void __iomem *intr_status; 154 void __iomem *intr_status;
205 void __iomem *intr_mask; 155 void __iomem *intr_mask;
206 void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS]; 156 void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS];
207 void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS]; 157 void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS];
208 void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS];
209 void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS]; 158 void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS];
210 void __iomem *rme_q_ci[BFI_IOC_MAX_CQS]; 159 void __iomem *rme_q_ci[BFI_IOC_MAX_CQS];
211 void __iomem *rme_q_pi[BFI_IOC_MAX_CQS]; 160 void __iomem *rme_q_pi[BFI_IOC_MAX_CQS];
212 void __iomem *rme_q_depth[BFI_IOC_MAX_CQS];
213 void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS]; 161 void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS];
214}; 162};
215 163
@@ -231,25 +179,55 @@ struct bfa_hwif_s {
231 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); 179 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
232 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 180 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
233 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 181 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
234 void (*hw_msix_install)(struct bfa_s *bfa); 182 void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
183 void (*hw_msix_queue_install)(struct bfa_s *bfa);
235 void (*hw_msix_uninstall)(struct bfa_s *bfa); 184 void (*hw_msix_uninstall)(struct bfa_s *bfa);
236 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); 185 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
237 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, 186 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
238 u32 *nvecs, u32 *maxvec); 187 u32 *nvecs, u32 *maxvec);
239 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start, 188 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
240 u32 *end); 189 u32 *end);
190 int cpe_vec_q0;
191 int rme_vec_q0;
241}; 192};
242typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); 193typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
243 194
195struct bfa_faa_cbfn_s {
196 bfa_cb_iocfc_t faa_cbfn;
197 void *faa_cbarg;
198};
199
200#define BFA_FAA_ENABLED 1
201#define BFA_FAA_DISABLED 2
202
203/*
204 * FAA attributes
205 */
206struct bfa_faa_attr_s {
207 wwn_t faa;
208 u8 faa_state;
209 u8 pwwn_source;
210 u8 rsvd[6];
211};
212
213struct bfa_faa_args_s {
214 struct bfa_faa_attr_s *faa_attr;
215 struct bfa_faa_cbfn_s faa_cb;
216 u8 faa_state;
217 bfa_boolean_t busy;
218};
219
244struct bfa_iocfc_s { 220struct bfa_iocfc_s {
245 struct bfa_s *bfa; 221 struct bfa_s *bfa;
246 struct bfa_iocfc_cfg_s cfg; 222 struct bfa_iocfc_cfg_s cfg;
247 int action; 223 int action;
248 u32 req_cq_pi[BFI_IOC_MAX_CQS]; 224 u32 req_cq_pi[BFI_IOC_MAX_CQS];
249 u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; 225 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
226 u8 hw_qid[BFI_IOC_MAX_CQS];
250 struct bfa_cb_qe_s init_hcb_qe; 227 struct bfa_cb_qe_s init_hcb_qe;
251 struct bfa_cb_qe_s stop_hcb_qe; 228 struct bfa_cb_qe_s stop_hcb_qe;
252 struct bfa_cb_qe_s dis_hcb_qe; 229 struct bfa_cb_qe_s dis_hcb_qe;
230 struct bfa_cb_qe_s en_hcb_qe;
253 struct bfa_cb_qe_s stats_hcb_qe; 231 struct bfa_cb_qe_s stats_hcb_qe;
254 bfa_boolean_t cfgdone; 232 bfa_boolean_t cfgdone;
255 233
@@ -257,7 +235,6 @@ struct bfa_iocfc_s {
257 struct bfi_iocfc_cfg_s *cfginfo; 235 struct bfi_iocfc_cfg_s *cfginfo;
258 struct bfa_dma_s cfgrsp_dma; 236 struct bfa_dma_s cfgrsp_dma;
259 struct bfi_iocfc_cfgrsp_s *cfgrsp; 237 struct bfi_iocfc_cfgrsp_s *cfgrsp;
260 struct bfi_iocfc_cfg_reply_s *cfg_reply;
261 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS]; 238 struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
262 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS]; 239 struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
263 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS]; 240 struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
@@ -267,18 +244,42 @@ struct bfa_iocfc_s {
267 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */ 244 bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
268 void *updateq_cbarg; /* bios callback arg */ 245 void *updateq_cbarg; /* bios callback arg */
269 u32 intr_mask; 246 u32 intr_mask;
247 struct bfa_faa_args_s faa_args;
248 struct bfa_mem_dma_s ioc_dma;
249 struct bfa_mem_dma_s iocfc_dma;
250 struct bfa_mem_dma_s reqq_dma[BFI_IOC_MAX_CQS];
251 struct bfa_mem_dma_s rspq_dma[BFI_IOC_MAX_CQS];
252 struct bfa_mem_kva_s kva_seg;
270}; 253};
271 254
272#define bfa_lpuid(__bfa) \ 255#define BFA_MEM_IOC_DMA(_bfa) (&((_bfa)->iocfc.ioc_dma))
273 bfa_ioc_portid(&(__bfa)->ioc) 256#define BFA_MEM_IOCFC_DMA(_bfa) (&((_bfa)->iocfc.iocfc_dma))
257#define BFA_MEM_REQQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.reqq_dma[(_qno)]))
258#define BFA_MEM_RSPQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.rspq_dma[(_qno)]))
259#define BFA_MEM_IOCFC_KVA(_bfa) (&((_bfa)->iocfc.kva_seg))
260
261#define bfa_fn_lpu(__bfa) \
262 bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
274#define bfa_msix_init(__bfa, __nvecs) \ 263#define bfa_msix_init(__bfa, __nvecs) \
275 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) 264 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
276#define bfa_msix_install(__bfa) \ 265#define bfa_msix_ctrl_install(__bfa) \
277 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) 266 ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
267#define bfa_msix_queue_install(__bfa) \
268 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
278#define bfa_msix_uninstall(__bfa) \ 269#define bfa_msix_uninstall(__bfa) \
279 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) 270 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
280#define bfa_isr_mode_set(__bfa, __msix) \ 271#define bfa_isr_rspq_ack(__bfa, __queue) do { \
281 ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) 272 if ((__bfa)->iocfc.hwif.hw_rspq_ack) \
273 (__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue); \
274} while (0)
275#define bfa_isr_reqq_ack(__bfa, __queue) do { \
276 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
277 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
278} while (0)
279#define bfa_isr_mode_set(__bfa, __msix) do { \
280 if ((__bfa)->iocfc.hwif.hw_isr_mode_set) \
281 (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix); \
282} while (0)
282#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ 283#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
283 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \ 284 ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
284 __nvecs, __maxvec)) 285 __nvecs, __maxvec))
@@ -290,17 +291,17 @@ struct bfa_iocfc_s {
290/* 291/*
291 * FC specific IOC functions. 292 * FC specific IOC functions.
292 */ 293 */
293void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 294void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
294 u32 *dm_len); 295 struct bfa_meminfo_s *meminfo,
296 struct bfa_s *bfa);
295void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, 297void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
296 struct bfa_iocfc_cfg_s *cfg, 298 struct bfa_iocfc_cfg_s *cfg,
297 struct bfa_meminfo_s *meminfo,
298 struct bfa_pcidev_s *pcidev); 299 struct bfa_pcidev_s *pcidev);
299void bfa_iocfc_init(struct bfa_s *bfa); 300void bfa_iocfc_init(struct bfa_s *bfa);
300void bfa_iocfc_start(struct bfa_s *bfa); 301void bfa_iocfc_start(struct bfa_s *bfa);
301void bfa_iocfc_stop(struct bfa_s *bfa); 302void bfa_iocfc_stop(struct bfa_s *bfa);
302void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg); 303void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
303void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa); 304void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
304bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa); 305bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
305void bfa_iocfc_reset_queues(struct bfa_s *bfa); 306void bfa_iocfc_reset_queues(struct bfa_s *bfa);
306 307
@@ -310,10 +311,10 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
310void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 311void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
311 312
312void bfa_hwcb_reginit(struct bfa_s *bfa); 313void bfa_hwcb_reginit(struct bfa_s *bfa);
313void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
314void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 314void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
315void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 315void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
316void bfa_hwcb_msix_install(struct bfa_s *bfa); 316void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
317void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
317void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); 318void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
318void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 319void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
319void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, 320void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -321,10 +322,12 @@ void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
321void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, 322void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
322 u32 *end); 323 u32 *end);
323void bfa_hwct_reginit(struct bfa_s *bfa); 324void bfa_hwct_reginit(struct bfa_s *bfa);
325void bfa_hwct2_reginit(struct bfa_s *bfa);
324void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
325void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
326void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
327void bfa_hwct_msix_install(struct bfa_s *bfa); 329void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
330void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
328void bfa_hwct_msix_uninstall(struct bfa_s *bfa); 331void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
329void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 332void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
330void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, 333void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -377,7 +380,8 @@ void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
377void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); 380void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
378void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); 381void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
379void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, 382void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
380 struct bfa_meminfo_s *meminfo); 383 struct bfa_meminfo_s *meminfo,
384 struct bfa_s *bfa);
381void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 385void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
382 struct bfa_meminfo_s *meminfo, 386 struct bfa_meminfo_s *meminfo,
383 struct bfa_pcidev_s *pcidev); 387 struct bfa_pcidev_s *pcidev);
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 91838c51fb76..c38e589105a5 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -17,7 +17,7 @@
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfa_modules.h" 19#include "bfa_modules.h"
20#include "bfi_ctreg.h" 20#include "bfi_reg.h"
21 21
22BFA_TRC_FILE(HAL, CORE); 22BFA_TRC_FILE(HAL, CORE);
23 23
@@ -25,13 +25,14 @@ BFA_TRC_FILE(HAL, CORE);
25 * BFA module list terminated by NULL 25 * BFA module list terminated by NULL
26 */ 26 */
27static struct bfa_module_s *hal_mods[] = { 27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_fcdiag,
28 &hal_mod_sgpg, 29 &hal_mod_sgpg,
29 &hal_mod_fcport, 30 &hal_mod_fcport,
30 &hal_mod_fcxp, 31 &hal_mod_fcxp,
31 &hal_mod_lps, 32 &hal_mod_lps,
32 &hal_mod_uf, 33 &hal_mod_uf,
33 &hal_mod_rport, 34 &hal_mod_rport,
34 &hal_mod_fcpim, 35 &hal_mod_fcp,
35 NULL 36 NULL
36}; 37};
37 38
@@ -41,7 +42,7 @@ static struct bfa_module_s *hal_mods[] = {
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { 42static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */ 43 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */ 44 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */ 45 bfa_fcdiag_intr, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */ 46 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */ 47 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */ 48 bfa_fcport_isr, /* BFI_MC_FCPORT */
@@ -51,7 +52,7 @@ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
51 bfa_fcxp_isr, /* BFI_MC_FCXP */ 52 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */ 53 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */ 54 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itnim_isr, /* BFI_MC_ITNIM */ 55 bfa_itn_isr, /* BFI_MC_ITN */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ 56 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ 57 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ 58 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
@@ -89,23 +90,78 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
89 90
90 91
91static void 92static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) 93bfa_com_port_attach(struct bfa_s *bfa)
93{ 94{
94 struct bfa_port_s *port = &bfa->modules.port; 95 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len; 96 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
96 u8 *dm_kva;
97 u64 dm_pa;
98 97
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); 98 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa); 99 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
100}
101
102/*
103 * ablk module attach
104 */
105static void
106bfa_com_ablk_attach(struct bfa_s *bfa)
107{
108 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
109 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
110
111 bfa_ablk_attach(ablk, &bfa->ioc);
112 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
113}
114
115static void
116bfa_com_cee_attach(struct bfa_s *bfa)
117{
118 struct bfa_cee_s *cee = &bfa->modules.cee;
119 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
120
121 cee->trcmod = bfa->trcmod;
122 bfa_cee_attach(cee, &bfa->ioc, bfa);
123 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
124}
125
126static void
127bfa_com_sfp_attach(struct bfa_s *bfa)
128{
129 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
130 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
131
132 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
133 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
134}
135
136static void
137bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
138{
139 struct bfa_flash_s *flash = BFA_FLASH(bfa);
140 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
141
142 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
143 bfa_flash_memclaim(flash, flash_dma->kva_curp,
144 flash_dma->dma_curp, mincfg);
145}
146
147static void
148bfa_com_diag_attach(struct bfa_s *bfa)
149{
150 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
151 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
152
153 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
154 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
155}
156
157static void
158bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
159{
160 struct bfa_phy_s *phy = BFA_PHY(bfa);
161 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
106 162
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; 163 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; 164 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
109} 165}
110 166
111/* 167/*
@@ -122,6 +178,7 @@ enum {
122 BFA_IOCFC_ACT_INIT = 1, 178 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2, 179 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3, 180 BFA_IOCFC_ACT_DISABLE = 3,
181 BFA_IOCFC_ACT_ENABLE = 4,
125}; 182};
126 183
127#define DEF_CFG_NUM_FABRICS 1 184#define DEF_CFG_NUM_FABRICS 1
@@ -173,10 +230,92 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
173 } 230 }
174} 231}
175 232
233static inline void
234bfa_isr_rspq(struct bfa_s *bfa, int qid)
235{
236 struct bfi_msg_s *m;
237 u32 pi, ci;
238 struct list_head *waitq;
239
240 bfa_isr_rspq_ack(bfa, qid);
241
242 ci = bfa_rspq_ci(bfa, qid);
243 pi = bfa_rspq_pi(bfa, qid);
244
245 while (ci != pi) {
246 m = bfa_rspq_elem(bfa, qid, ci);
247 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
248
249 bfa_isrs[m->mhdr.msg_class] (bfa, m);
250 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
251 }
252
253 /*
254 * update CI
255 */
256 bfa_rspq_ci(bfa, qid) = pi;
257 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
258 mmiowb();
259
260 /*
261 * Resume any pending requests in the corresponding reqq.
262 */
263 waitq = bfa_reqq(bfa, qid);
264 if (!list_empty(waitq))
265 bfa_reqq_resume(bfa, qid);
266}
267
268static inline void
269bfa_isr_reqq(struct bfa_s *bfa, int qid)
270{
271 struct list_head *waitq;
272
273 bfa_isr_reqq_ack(bfa, qid);
274
275 /*
276 * Resume any pending requests in the corresponding reqq.
277 */
278 waitq = bfa_reqq(bfa, qid);
279 if (!list_empty(waitq))
280 bfa_reqq_resume(bfa, qid);
281}
282
176void 283void
177bfa_msix_all(struct bfa_s *bfa, int vec) 284bfa_msix_all(struct bfa_s *bfa, int vec)
178{ 285{
179 bfa_intx(bfa); 286 u32 intr, qintr;
287 int queue;
288
289 intr = readl(bfa->iocfc.bfa_regs.intr_status);
290 if (!intr)
291 return;
292
293 /*
294 * RME completion queue interrupt
295 */
296 qintr = intr & __HFN_INT_RME_MASK;
297 if (qintr && bfa->queue_process) {
298 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
299 bfa_isr_rspq(bfa, queue);
300 }
301
302 intr &= ~qintr;
303 if (!intr)
304 return;
305
306 /*
307 * CPE completion queue interrupt
308 */
309 qintr = intr & __HFN_INT_CPE_MASK;
310 if (qintr && bfa->queue_process) {
311 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
312 bfa_isr_reqq(bfa, queue);
313 }
314 intr &= ~qintr;
315 if (!intr)
316 return;
317
318 bfa_msix_lpu_err(bfa, intr);
180} 319}
181 320
182bfa_boolean_t 321bfa_boolean_t
@@ -189,16 +328,19 @@ bfa_intx(struct bfa_s *bfa)
189 if (!intr) 328 if (!intr)
190 return BFA_FALSE; 329 return BFA_FALSE;
191 330
331 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
332 if (qintr)
333 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
334
192 /* 335 /*
193 * RME completion queue interrupt 336 * RME completion queue interrupt
194 */ 337 */
195 qintr = intr & __HFN_INT_RME_MASK; 338 qintr = intr & __HFN_INT_RME_MASK;
196 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 339 if (qintr && bfa->queue_process) {
197 340 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
198 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 341 bfa_isr_rspq(bfa, queue);
199 if (intr & (__HFN_INT_RME_Q0 << queue))
200 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
201 } 342 }
343
202 intr &= ~qintr; 344 intr &= ~qintr;
203 if (!intr) 345 if (!intr)
204 return BFA_TRUE; 346 return BFA_TRUE;
@@ -207,11 +349,9 @@ bfa_intx(struct bfa_s *bfa)
207 * CPE completion queue interrupt 349 * CPE completion queue interrupt
208 */ 350 */
209 qintr = intr & __HFN_INT_CPE_MASK; 351 qintr = intr & __HFN_INT_CPE_MASK;
210 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 352 if (qintr && bfa->queue_process) {
211 353 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
212 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 354 bfa_isr_reqq(bfa, queue);
213 if (intr & (__HFN_INT_CPE_Q0 << queue))
214 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
215 } 355 }
216 intr &= ~qintr; 356 intr &= ~qintr;
217 if (!intr) 357 if (!intr)
@@ -225,32 +365,25 @@ bfa_intx(struct bfa_s *bfa)
225void 365void
226bfa_isr_enable(struct bfa_s *bfa) 366bfa_isr_enable(struct bfa_s *bfa)
227{ 367{
228 u32 intr_unmask; 368 u32 umsk;
229 int pci_func = bfa_ioc_pcifn(&bfa->ioc); 369 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
230 370
231 bfa_trc(bfa, pci_func); 371 bfa_trc(bfa, pci_func);
232 372
233 bfa_msix_install(bfa); 373 bfa_msix_ctrl_install(bfa);
234 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 374
235 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | 375 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
236 __HFN_INT_LL_HALT); 376 umsk = __HFN_INT_ERR_MASK_CT2;
237 377 umsk |= pci_func == 0 ?
238 if (pci_func == 0) 378 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
239 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | 379 } else {
240 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | 380 umsk = __HFN_INT_ERR_MASK;
241 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | 381 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
242 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | 382 }
243 __HFN_INT_MBOX_LPU0); 383
244 else 384 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
245 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | 385 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
246 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | 386 bfa->iocfc.intr_mask = ~umsk;
247 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
248 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
249 __HFN_INT_MBOX_LPU1);
250
251 writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
252 writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
253 bfa->iocfc.intr_mask = ~intr_unmask;
254 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 387 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
255} 388}
256 389
@@ -263,20 +396,9 @@ bfa_isr_disable(struct bfa_s *bfa)
263} 396}
264 397
265void 398void
266bfa_msix_reqq(struct bfa_s *bfa, int qid) 399bfa_msix_reqq(struct bfa_s *bfa, int vec)
267{ 400{
268 struct list_head *waitq; 401 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
269
270 qid &= (BFI_IOC_MAX_CQS - 1);
271
272 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
273
274 /*
275 * Resume any pending requests in the corresponding reqq.
276 */
277 waitq = bfa_reqq(bfa, qid);
278 if (!list_empty(waitq))
279 bfa_reqq_resume(bfa, qid);
280} 402}
281 403
282void 404void
@@ -290,57 +412,37 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
290} 412}
291 413
292void 414void
293bfa_msix_rspq(struct bfa_s *bfa, int qid) 415bfa_msix_rspq(struct bfa_s *bfa, int vec)
294{ 416{
295 struct bfi_msg_s *m; 417 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
296 u32 pi, ci;
297 struct list_head *waitq;
298
299 qid &= (BFI_IOC_MAX_CQS - 1);
300
301 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
302
303 ci = bfa_rspq_ci(bfa, qid);
304 pi = bfa_rspq_pi(bfa, qid);
305
306 if (bfa->rme_process) {
307 while (ci != pi) {
308 m = bfa_rspq_elem(bfa, qid, ci);
309 bfa_isrs[m->mhdr.msg_class] (bfa, m);
310 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
311 }
312 }
313
314 /*
315 * update CI
316 */
317 bfa_rspq_ci(bfa, qid) = pi;
318 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
319 mmiowb();
320
321 /*
322 * Resume any pending requests in the corresponding reqq.
323 */
324 waitq = bfa_reqq(bfa, qid);
325 if (!list_empty(waitq))
326 bfa_reqq_resume(bfa, qid);
327} 418}
328 419
329void 420void
330bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 421bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
331{ 422{
332 u32 intr, curr_value; 423 u32 intr, curr_value;
424 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
333 425
334 intr = readl(bfa->iocfc.bfa_regs.intr_status); 426 intr = readl(bfa->iocfc.bfa_regs.intr_status);
335 427
336 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 428 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
337 bfa_ioc_mbox_isr(&bfa->ioc); 429 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
430 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
432 __HFN_INT_MBOX_LPU1_CT2);
433 intr &= __HFN_INT_ERR_MASK_CT2;
434 } else {
435 halt_isr = intr & __HFN_INT_LL_HALT;
436 pss_isr = intr & __HFN_INT_ERR_PSS;
437 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
438 intr &= __HFN_INT_ERR_MASK;
439 }
338 440
339 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 441 if (lpu_isr)
340 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); 442 bfa_ioc_mbox_isr(&bfa->ioc);
341 443
342 if (intr) { 444 if (intr) {
343 if (intr & __HFN_INT_LL_HALT) { 445 if (halt_isr) {
344 /* 446 /*
345 * If LL_HALT bit is set then FW Init Halt LL Port 447 * If LL_HALT bit is set then FW Init Halt LL Port
346 * Register needs to be cleared as well so Interrupt 448 * Register needs to be cleared as well so Interrupt
@@ -351,7 +453,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
351 writel(curr_value, bfa->ioc.ioc_regs.ll_halt); 453 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
352 } 454 }
353 455
354 if (intr & __HFN_INT_ERR_PSS) { 456 if (pss_isr) {
355 /* 457 /*
356 * ERR_PSS bit needs to be cleared as well in case 458 * ERR_PSS bit needs to be cleared as well in case
357 * interrups are shared so driver's interrupt handler is 459 * interrups are shared so driver's interrupt handler is
@@ -359,7 +461,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
359 */ 461 */
360 curr_value = readl( 462 curr_value = readl(
361 bfa->ioc.ioc_regs.pss_err_status_reg); 463 bfa->ioc.ioc_regs.pss_err_status_reg);
362 curr_value &= __PSS_ERR_STATUS_SET;
363 writel(curr_value, 464 writel(curr_value,
364 bfa->ioc.ioc_regs.pss_err_status_reg); 465 bfa->ioc.ioc_regs.pss_err_status_reg);
365 } 466 }
@@ -377,41 +478,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
377 * BFA IOC private functions 478 * BFA IOC private functions
378 */ 479 */
379 480
380static void
381bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
382{
383 int i, per_reqq_sz, per_rspq_sz;
384
385 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
386 BFA_DMA_ALIGN_SZ);
387 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
388 BFA_DMA_ALIGN_SZ);
389
390 /*
391 * Calculate CQ size
392 */
393 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
394 *dm_len = *dm_len + per_reqq_sz;
395 *dm_len = *dm_len + per_rspq_sz;
396 }
397
398 /*
399 * Calculate Shadow CI/PI size
400 */
401 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
402 *dm_len += (2 * BFA_CACHELINE_SZ);
403}
404
405static void
406bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
407{
408 *dm_len +=
409 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
410 *dm_len +=
411 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
412 BFA_CACHELINE_SZ);
413}
414
415/* 481/*
416 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ 482 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
417 */ 483 */
@@ -433,8 +499,13 @@ bfa_iocfc_send_cfg(void *bfa_arg)
433 /* 499 /*
434 * initialize IOC configuration info 500 * initialize IOC configuration info
435 */ 501 */
502 cfg_info->single_msix_vec = 0;
503 if (bfa->msix.nvecs == 1)
504 cfg_info->single_msix_vec = 1;
436 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 505 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
437 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 506 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
507 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
508 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
438 509
439 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); 510 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
440 /* 511 /*
@@ -469,7 +540,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
469 * dma map IOC configuration itself 540 * dma map IOC configuration itself
470 */ 541 */
471 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, 542 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
472 bfa_lpuid(bfa)); 543 bfa_fn_lpu(bfa));
473 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); 544 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
474 545
475 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, 546 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
@@ -491,26 +562,40 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
491 /* 562 /*
492 * Initialize chip specific handlers. 563 * Initialize chip specific handlers.
493 */ 564 */
494 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { 565 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
495 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 566 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
496 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 567 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
497 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 568 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
498 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 569 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
499 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; 570 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
571 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
500 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 572 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
501 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 573 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
502 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 574 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
503 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; 575 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
576 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
577 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
504 } else { 578 } else {
505 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 579 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
506 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; 580 iocfc->hwif.hw_reqq_ack = NULL;
507 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 581 iocfc->hwif.hw_rspq_ack = NULL;
508 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 582 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
509 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; 583 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
584 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
510 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 585 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
511 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 586 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
512 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 587 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
513 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; 588 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
589 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
590 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
591 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
592 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
593 }
594
595 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
596 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
597 iocfc->hwif.hw_isr_mode_set = NULL;
598 iocfc->hwif.hw_rspq_ack = NULL;
514 } 599 }
515 600
516 iocfc->hwif.hw_reginit(bfa); 601 iocfc->hwif.hw_reginit(bfa);
@@ -518,48 +603,42 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
518} 603}
519 604
520static void 605static void
521bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, 606bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
522 struct bfa_meminfo_s *meminfo)
523{ 607{
524 u8 *dm_kva; 608 u8 *dm_kva = NULL;
525 u64 dm_pa; 609 u64 dm_pa = 0;
526 int i, per_reqq_sz, per_rspq_sz; 610 int i, per_reqq_sz, per_rspq_sz, dbgsz;
527 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 611 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
528 int dbgsz; 612 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
529 613 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
530 dm_kva = bfa_meminfo_dma_virt(meminfo); 614 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
531 dm_pa = bfa_meminfo_dma_phys(meminfo);
532 615
533 /* 616 /* First allocate dma memory for IOC */
534 * First allocate dma memory for IOC. 617 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
535 */ 618 bfa_mem_dma_phys(ioc_dma));
536 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
537 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
538 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
539 619
540 /* 620 /* Claim DMA-able memory for the request/response queues */
541 * Claim DMA-able memory for the request/response queues and for shadow
542 * ci/pi registers
543 */
544 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), 621 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
545 BFA_DMA_ALIGN_SZ); 622 BFA_DMA_ALIGN_SZ);
546 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), 623 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
547 BFA_DMA_ALIGN_SZ); 624 BFA_DMA_ALIGN_SZ);
548 625
549 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 626 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
550 iocfc->req_cq_ba[i].kva = dm_kva; 627 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
551 iocfc->req_cq_ba[i].pa = dm_pa; 628 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
552 memset(dm_kva, 0, per_reqq_sz); 629 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
553 dm_kva += per_reqq_sz; 630 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
554 dm_pa += per_reqq_sz; 631
555 632 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
556 iocfc->rsp_cq_ba[i].kva = dm_kva; 633 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
557 iocfc->rsp_cq_ba[i].pa = dm_pa; 634 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
558 memset(dm_kva, 0, per_rspq_sz); 635 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
559 dm_kva += per_rspq_sz;
560 dm_pa += per_rspq_sz;
561 } 636 }
562 637
638 /* Claim IOCFC dma memory - for shadow CI/PI */
639 dm_kva = bfa_mem_dma_virt(iocfc_dma);
640 dm_pa = bfa_mem_dma_phys(iocfc_dma);
641
563 for (i = 0; i < cfg->fwcfg.num_cqs; i++) { 642 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
564 iocfc->req_cq_shadow_ci[i].kva = dm_kva; 643 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
565 iocfc->req_cq_shadow_ci[i].pa = dm_pa; 644 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
@@ -572,36 +651,27 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
572 dm_pa += BFA_CACHELINE_SZ; 651 dm_pa += BFA_CACHELINE_SZ;
573 } 652 }
574 653
575 /* 654 /* Claim IOCFC dma memory - for the config info page */
576 * Claim DMA-able memory for the config info page
577 */
578 bfa->iocfc.cfg_info.kva = dm_kva; 655 bfa->iocfc.cfg_info.kva = dm_kva;
579 bfa->iocfc.cfg_info.pa = dm_pa; 656 bfa->iocfc.cfg_info.pa = dm_pa;
580 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; 657 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
581 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 658 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
582 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); 659 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
583 660
584 /* 661 /* Claim IOCFC dma memory - for the config response */
585 * Claim DMA-able memory for the config response
586 */
587 bfa->iocfc.cfgrsp_dma.kva = dm_kva; 662 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
588 bfa->iocfc.cfgrsp_dma.pa = dm_pa; 663 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
589 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; 664 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
590 665 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
591 dm_kva += 666 BFA_CACHELINE_SZ);
592 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
593 BFA_CACHELINE_SZ);
594 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), 667 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
595 BFA_CACHELINE_SZ); 668 BFA_CACHELINE_SZ);
596
597
598 bfa_meminfo_dma_virt(meminfo) = dm_kva;
599 bfa_meminfo_dma_phys(meminfo) = dm_pa;
600 669
670 /* Claim IOCFC kva memory */
601 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 671 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
602 if (dbgsz > 0) { 672 if (dbgsz > 0) {
603 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); 673 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
604 bfa_meminfo_kva(meminfo) += dbgsz; 674 bfa_mem_kva_curp(iocfc) += dbgsz;
605 } 675 }
606} 676}
607 677
@@ -613,7 +683,9 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
613{ 683{
614 int i; 684 int i;
615 685
616 bfa->rme_process = BFA_TRUE; 686 bfa->queue_process = BFA_TRUE;
687 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
688 bfa_isr_rspq_ack(bfa, i);
617 689
618 for (i = 0; hal_mods[i]; i++) 690 for (i = 0; hal_mods[i]; i++)
619 hal_mods[i]->start(bfa); 691 hal_mods[i]->start(bfa);
@@ -660,6 +732,16 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
660} 732}
661 733
662static void 734static void
735bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
736{
737 struct bfa_s *bfa = bfa_arg;
738 struct bfad_s *bfad = bfa->bfad;
739
740 if (compl)
741 complete(&bfad->enable_comp);
742}
743
744static void
663bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) 745bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
664{ 746{
665 struct bfa_s *bfa = bfa_arg; 747 struct bfa_s *bfa = bfa_arg;
@@ -669,6 +751,37 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
669 complete(&bfad->disable_comp); 751 complete(&bfad->disable_comp);
670} 752}
671 753
754/**
755 * configure queue registers from firmware response
756 */
757static void
758bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
759{
760 int i;
761 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
762 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
763
764 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
765 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
766 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
767 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
768 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
769 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
770 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
771 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
772 }
773}
774
775static void
776bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
777{
778 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
779 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
780 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
781 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
782 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
783}
784
672/* 785/*
673 * Update BFA configuration from firmware configuration. 786 * Update BFA configuration from firmware configuration.
674 */ 787 */
@@ -681,6 +794,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
681 794
682 fwcfg->num_cqs = fwcfg->num_cqs; 795 fwcfg->num_cqs = fwcfg->num_cqs;
683 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); 796 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
797 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
684 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); 798 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
685 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); 799 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
686 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 800 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
@@ -689,14 +803,33 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
689 iocfc->cfgdone = BFA_TRUE; 803 iocfc->cfgdone = BFA_TRUE;
690 804
691 /* 805 /*
806 * configure queue register offsets as learnt from firmware
807 */
808 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
809
810 /*
811 * Re-configure resources as learnt from Firmware
812 */
813 bfa_iocfc_res_recfg(bfa, fwcfg);
814
815 /*
816 * Install MSIX queue handlers
817 */
818 bfa_msix_queue_install(bfa);
819
820 /*
692 * Configuration is complete - initialize/start submodules 821 * Configuration is complete - initialize/start submodules
693 */ 822 */
694 bfa_fcport_init(bfa); 823 bfa_fcport_init(bfa);
695 824
696 if (iocfc->action == BFA_IOCFC_ACT_INIT) 825 if (iocfc->action == BFA_IOCFC_ACT_INIT)
697 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 826 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
698 else 827 else {
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
829 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
830 bfa_iocfc_enable_cb, bfa);
699 bfa_iocfc_start_submod(bfa); 831 bfa_iocfc_start_submod(bfa);
832 }
700} 833}
701void 834void
702bfa_iocfc_reset_queues(struct bfa_s *bfa) 835bfa_iocfc_reset_queues(struct bfa_s *bfa)
@@ -711,6 +844,181 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
711 } 844 }
712} 845}
713 846
847/* Fabric Assigned Address specific functions */
848
849/*
850 * Check whether IOC is ready before sending command down
851 */
852static bfa_status_t
853bfa_faa_validate_request(struct bfa_s *bfa)
854{
855 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
856 u32 card_type = bfa->ioc.attr->card_type;
857
858 if (bfa_ioc_is_operational(&bfa->ioc)) {
859 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
860 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
861 } else {
862 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
863 return BFA_STATUS_IOC_NON_OP;
864 }
865
866 return BFA_STATUS_OK;
867}
868
869bfa_status_t
870bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
871{
872 struct bfi_faa_en_dis_s faa_enable_req;
873 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
874 bfa_status_t status;
875
876 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
877 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
878
879 status = bfa_faa_validate_request(bfa);
880 if (status != BFA_STATUS_OK)
881 return status;
882
883 if (iocfc->faa_args.busy == BFA_TRUE)
884 return BFA_STATUS_DEVBUSY;
885
886 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
887 return BFA_STATUS_FAA_ENABLED;
888
889 if (bfa_fcport_is_trunk_enabled(bfa))
890 return BFA_STATUS_ERROR_TRUNK_ENABLED;
891
892 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
893 iocfc->faa_args.busy = BFA_TRUE;
894
895 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
896 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
897 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
898
899 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
900 sizeof(struct bfi_faa_en_dis_s));
901
902 return BFA_STATUS_OK;
903}
904
905bfa_status_t
906bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
907 void *cbarg)
908{
909 struct bfi_faa_en_dis_s faa_disable_req;
910 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
911 bfa_status_t status;
912
913 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
914 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
915
916 status = bfa_faa_validate_request(bfa);
917 if (status != BFA_STATUS_OK)
918 return status;
919
920 if (iocfc->faa_args.busy == BFA_TRUE)
921 return BFA_STATUS_DEVBUSY;
922
923 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
924 return BFA_STATUS_FAA_DISABLED;
925
926 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
927 iocfc->faa_args.busy = BFA_TRUE;
928
929 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
930 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
931 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
932
933 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
934 sizeof(struct bfi_faa_en_dis_s));
935
936 return BFA_STATUS_OK;
937}
938
939bfa_status_t
940bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
941 bfa_cb_iocfc_t cbfn, void *cbarg)
942{
943 struct bfi_faa_query_s faa_attr_req;
944 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
945 bfa_status_t status;
946
947 iocfc->faa_args.faa_attr = attr;
948 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
949 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
950
951 status = bfa_faa_validate_request(bfa);
952 if (status != BFA_STATUS_OK)
953 return status;
954
955 if (iocfc->faa_args.busy == BFA_TRUE)
956 return BFA_STATUS_DEVBUSY;
957
958 iocfc->faa_args.busy = BFA_TRUE;
959 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
960 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
961 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
962
963 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
964 sizeof(struct bfi_faa_query_s));
965
966 return BFA_STATUS_OK;
967}
968
969/*
970 * FAA enable response
971 */
972static void
973bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
974 struct bfi_faa_en_dis_rsp_s *rsp)
975{
976 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
977 bfa_status_t status = rsp->status;
978
979 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
980
981 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
982 iocfc->faa_args.busy = BFA_FALSE;
983}
984
985/*
986 * FAA disable response
987 */
988static void
989bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
990 struct bfi_faa_en_dis_rsp_s *rsp)
991{
992 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
993 bfa_status_t status = rsp->status;
994
995 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
996
997 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
998 iocfc->faa_args.busy = BFA_FALSE;
999}
1000
1001/*
1002 * FAA query response
1003 */
1004static void
1005bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
1006 bfi_faa_query_rsp_t *rsp)
1007{
1008 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1009
1010 if (iocfc->faa_args.faa_attr) {
1011 iocfc->faa_args.faa_attr->faa = rsp->faa;
1012 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1013 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1014 }
1015
1016 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1017
1018 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1019 iocfc->faa_args.busy = BFA_FALSE;
1020}
1021
714/* 1022/*
715 * IOC enable request is complete 1023 * IOC enable request is complete
716 */ 1024 */
@@ -719,11 +1027,20 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
719{ 1027{
720 struct bfa_s *bfa = bfa_arg; 1028 struct bfa_s *bfa = bfa_arg;
721 1029
1030 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
1031 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1032 bfa_iocfc_init_cb, bfa);
1033 return;
1034 }
1035
722 if (status != BFA_STATUS_OK) { 1036 if (status != BFA_STATUS_OK) {
723 bfa_isr_disable(bfa); 1037 bfa_isr_disable(bfa);
724 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) 1038 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
725 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1039 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
726 bfa_iocfc_init_cb, bfa); 1040 bfa_iocfc_init_cb, bfa);
1041 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1042 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1043 bfa_iocfc_enable_cb, bfa);
727 return; 1044 return;
728 } 1045 }
729 1046
@@ -759,7 +1076,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
759{ 1076{
760 struct bfa_s *bfa = bfa_arg; 1077 struct bfa_s *bfa = bfa_arg;
761 1078
762 bfa->rme_process = BFA_FALSE; 1079 bfa->queue_process = BFA_FALSE;
763 1080
764 bfa_isr_disable(bfa); 1081 bfa_isr_disable(bfa);
765 bfa_iocfc_disable_submod(bfa); 1082 bfa_iocfc_disable_submod(bfa);
@@ -786,15 +1103,47 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
786 * Query IOC memory requirement information. 1103 * Query IOC memory requirement information.
787 */ 1104 */
788void 1105void
789bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1106bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
790 u32 *dm_len) 1107 struct bfa_s *bfa)
791{ 1108{
792 /* dma memory for IOC */ 1109 int q, per_reqq_sz, per_rspq_sz;
793 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); 1110 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1111 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1112 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1113 u32 dm_len = 0;
1114
1115 /* dma memory setup for IOC */
1116 bfa_mem_dma_setup(meminfo, ioc_dma,
1117 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1118
1119 /* dma memory setup for REQ/RSP queues */
1120 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1121 BFA_DMA_ALIGN_SZ);
1122 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1123 BFA_DMA_ALIGN_SZ);
1124
1125 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1126 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1127 per_reqq_sz);
1128 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1129 per_rspq_sz);
1130 }
1131
1132 /* IOCFC dma memory - calculate Shadow CI/PI size */
1133 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1134 dm_len += (2 * BFA_CACHELINE_SZ);
1135
1136 /* IOCFC dma memory - calculate config info / rsp size */
1137 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1138 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1139 BFA_CACHELINE_SZ);
794 1140
795 bfa_iocfc_fw_cfg_sz(cfg, dm_len); 1141 /* dma memory setup for IOCFC */
796 bfa_iocfc_cqs_sz(cfg, dm_len); 1142 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
797 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 1143
1144 /* kva memory setup for IOCFC */
1145 bfa_mem_kva_setup(meminfo, iocfc_kva,
1146 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
798} 1147}
799 1148
800/* 1149/*
@@ -802,7 +1151,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
802 */ 1151 */
803void 1152void
804bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1153bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
805 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1154 struct bfa_pcidev_s *pcidev)
806{ 1155{
807 int i; 1156 int i;
808 struct bfa_ioc_s *ioc = &bfa->ioc; 1157 struct bfa_ioc_s *ioc = &bfa->ioc;
@@ -815,17 +1164,11 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
815 ioc->trcmod = bfa->trcmod; 1164 ioc->trcmod = bfa->trcmod;
816 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); 1165 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
817 1166
818 /* 1167 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
819 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
820 */
821 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
822 bfa_ioc_set_fcmode(&bfa->ioc);
823
824 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
825 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); 1168 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
826 1169
827 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 1170 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
828 bfa_iocfc_mem_claim(bfa, cfg, meminfo); 1171 bfa_iocfc_mem_claim(bfa, cfg);
829 INIT_LIST_HEAD(&bfa->timer_mod.timer_q); 1172 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
830 1173
831 INIT_LIST_HEAD(&bfa->comp_q); 1174 INIT_LIST_HEAD(&bfa->comp_q);
@@ -863,7 +1206,7 @@ bfa_iocfc_stop(struct bfa_s *bfa)
863{ 1206{
864 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 1207 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
865 1208
866 bfa->rme_process = BFA_FALSE; 1209 bfa->queue_process = BFA_FALSE;
867 bfa_ioc_disable(&bfa->ioc); 1210 bfa_ioc_disable(&bfa->ioc);
868} 1211}
869 1212
@@ -879,12 +1222,22 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
879 1222
880 switch (msg->mh.msg_id) { 1223 switch (msg->mh.msg_id) {
881 case BFI_IOCFC_I2H_CFG_REPLY: 1224 case BFI_IOCFC_I2H_CFG_REPLY:
882 iocfc->cfg_reply = &msg->cfg_reply;
883 bfa_iocfc_cfgrsp(bfa); 1225 bfa_iocfc_cfgrsp(bfa);
884 break; 1226 break;
885 case BFI_IOCFC_I2H_UPDATEQ_RSP: 1227 case BFI_IOCFC_I2H_UPDATEQ_RSP:
886 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 1228 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
887 break; 1229 break;
1230 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1231 bfa_faa_enable_reply(iocfc,
1232 (struct bfi_faa_en_dis_rsp_s *)msg);
1233 break;
1234 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1235 bfa_faa_disable_reply(iocfc,
1236 (struct bfi_faa_en_dis_rsp_s *)msg);
1237 break;
1238 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1239 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1240 break;
888 default: 1241 default:
889 WARN_ON(1); 1242 WARN_ON(1);
890 } 1243 }
@@ -926,7 +1279,7 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
926 return BFA_STATUS_DEVBUSY; 1279 return BFA_STATUS_DEVBUSY;
927 1280
928 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, 1281 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
929 bfa_lpuid(bfa)); 1282 bfa_fn_lpu(bfa));
930 m->coalesce = iocfc->cfginfo->intr_attr.coalesce; 1283 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
931 m->delay = iocfc->cfginfo->intr_attr.delay; 1284 m->delay = iocfc->cfginfo->intr_attr.delay;
932 m->latency = iocfc->cfginfo->intr_attr.latency; 1285 m->latency = iocfc->cfginfo->intr_attr.latency;
@@ -934,17 +1287,17 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
934 bfa_trc(bfa, attr->delay); 1287 bfa_trc(bfa, attr->delay);
935 bfa_trc(bfa, attr->latency); 1288 bfa_trc(bfa, attr->latency);
936 1289
937 bfa_reqq_produce(bfa, BFA_REQQ_IOC); 1290 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
938 return BFA_STATUS_OK; 1291 return BFA_STATUS_OK;
939} 1292}
940 1293
941void 1294void
942bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) 1295bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
943{ 1296{
944 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1297 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
945 1298
946 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); 1299 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
947 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); 1300 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
948} 1301}
949/* 1302/*
950 * Enable IOC after it is disabled. 1303 * Enable IOC after it is disabled.
@@ -954,6 +1307,7 @@ bfa_iocfc_enable(struct bfa_s *bfa)
954{ 1307{
955 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1308 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
956 "IOC Enable"); 1309 "IOC Enable");
1310 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
957 bfa_ioc_enable(&bfa->ioc); 1311 bfa_ioc_enable(&bfa->ioc);
958} 1312}
959 1313
@@ -964,7 +1318,7 @@ bfa_iocfc_disable(struct bfa_s *bfa)
964 "IOC Disable"); 1318 "IOC Disable");
965 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; 1319 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
966 1320
967 bfa->rme_process = BFA_FALSE; 1321 bfa->queue_process = BFA_FALSE;
968 bfa_ioc_disable(&bfa->ioc); 1322 bfa_ioc_disable(&bfa->ioc);
969} 1323}
970 1324
@@ -1033,33 +1387,49 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1033 * starting address for each block and provide the same 1387 * starting address for each block and provide the same
1034 * structure as input parameter to bfa_attach() call. 1388 * structure as input parameter to bfa_attach() call.
1035 * 1389 *
1390 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1391 * dma, kva memory information of the bfa sub-modules.
1392 *
1036 * @return void 1393 * @return void
1037 * 1394 *
1038 * Special Considerations: @note 1395 * Special Considerations: @note
1039 */ 1396 */
1040void 1397void
1041bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) 1398bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1399 struct bfa_s *bfa)
1042{ 1400{
1043 int i; 1401 int i;
1044 u32 km_len = 0, dm_len = 0; 1402 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1403 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1404 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
1405 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
1406 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1407 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1408 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1045 1409
1046 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1410 WARN_ON((cfg == NULL) || (meminfo == NULL));
1047 1411
1048 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); 1412 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1049 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1050 BFA_MEM_TYPE_KVA;
1051 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1052 BFA_MEM_TYPE_DMA;
1053 1413
1054 bfa_iocfc_meminfo(cfg, &km_len, &dm_len); 1414 /* Initialize the DMA & KVA meminfo queues */
1055 1415 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1056 for (i = 0; hal_mods[i]; i++) 1416 INIT_LIST_HEAD(&meminfo->kva_info.qe);
1057 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1058 1417
1059 dm_len += bfa_port_meminfo(); 1418 bfa_iocfc_meminfo(cfg, meminfo, bfa);
1060 1419
1061 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; 1420 for (i = 0; hal_mods[i]; i++)
1062 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 1421 hal_mods[i]->meminfo(cfg, meminfo, bfa);
1422
1423 /* dma info setup */
1424 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1425 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
1426 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
1427 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
1428 bfa_mem_dma_setup(meminfo, flash_dma,
1429 bfa_flash_meminfo(cfg->drvcfg.min_cfg));
1430 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1431 bfa_mem_dma_setup(meminfo, phy_dma,
1432 bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1063} 1433}
1064 1434
1065/* 1435/*
@@ -1092,28 +1462,46 @@ void
1092bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1462bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1093 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1463 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1094{ 1464{
1095 int i; 1465 int i;
1096 struct bfa_mem_elem_s *melem; 1466 struct bfa_mem_dma_s *dma_info, *dma_elem;
1467 struct bfa_mem_kva_s *kva_info, *kva_elem;
1468 struct list_head *dm_qe, *km_qe;
1097 1469
1098 bfa->fcs = BFA_FALSE; 1470 bfa->fcs = BFA_FALSE;
1099 1471
1100 WARN_ON((cfg == NULL) || (meminfo == NULL)); 1472 WARN_ON((cfg == NULL) || (meminfo == NULL));
1101 1473
1102 /* 1474 /* Initialize memory pointers for iterative allocation */
1103 * initialize all memory pointers for iterative allocation 1475 dma_info = &meminfo->dma_info;
1104 */ 1476 dma_info->kva_curp = dma_info->kva;
1105 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 1477 dma_info->dma_curp = dma_info->dma;
1106 melem = meminfo->meminfo + i; 1478
1107 melem->kva_curp = melem->kva; 1479 kva_info = &meminfo->kva_info;
1108 melem->dma_curp = melem->dma; 1480 kva_info->kva_curp = kva_info->kva;
1481
1482 list_for_each(dm_qe, &dma_info->qe) {
1483 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1484 dma_elem->kva_curp = dma_elem->kva;
1485 dma_elem->dma_curp = dma_elem->dma;
1486 }
1487
1488 list_for_each(km_qe, &kva_info->qe) {
1489 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1490 kva_elem->kva_curp = kva_elem->kva;
1109 } 1491 }
1110 1492
1111 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev); 1493 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
1112 1494
1113 for (i = 0; hal_mods[i]; i++) 1495 for (i = 0; hal_mods[i]; i++)
1114 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); 1496 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
1115 1497
1116 bfa_com_port_attach(bfa, meminfo); 1498 bfa_com_port_attach(bfa);
1499 bfa_com_ablk_attach(bfa);
1500 bfa_com_cee_attach(bfa);
1501 bfa_com_sfp_attach(bfa);
1502 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1503 bfa_com_diag_attach(bfa);
1504 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1117} 1505}
1118 1506
1119/* 1507/*
@@ -1215,6 +1603,7 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1215 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS; 1603 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1216 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS; 1604 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1217 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS; 1605 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1606 cfg->fwcfg.num_fwtio_reqs = 0;
1218 1607
1219 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS; 1608 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1220 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS; 1609 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
@@ -1236,6 +1625,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1236 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; 1625 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1237 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; 1626 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1238 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 1627 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1628 cfg->fwcfg.num_fwtio_reqs = 0;
1239 1629
1240 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 1630 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1241 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; 1631 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index d85f93aea465..ed8d31b0188b 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -40,7 +40,12 @@ enum {
40 BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */ 40 BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */
41 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */ 41 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */
42 BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */ 42 BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */
43 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ 43 BFA_MFG_TYPE_PROWLER_F = 1560, /* Prowler FC only cards */
44 BFA_MFG_TYPE_PROWLER_N = 1410, /* Prowler NIC only cards */
45 BFA_MFG_TYPE_PROWLER_C = 1710, /* Prowler CNA only cards */
46 BFA_MFG_TYPE_PROWLER_D = 1860, /* Prowler Dual cards */
47 BFA_MFG_TYPE_CHINOOK = 1867, /* Chinook cards */
48 BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
44}; 49};
45 50
46#pragma pack(1) 51#pragma pack(1)
@@ -53,7 +58,8 @@ enum {
53 (type) == BFA_MFG_TYPE_WANCHESE || \ 58 (type) == BFA_MFG_TYPE_WANCHESE || \
54 (type) == BFA_MFG_TYPE_ASTRA || \ 59 (type) == BFA_MFG_TYPE_ASTRA || \
55 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ 60 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
56 (type) == BFA_MFG_TYPE_LIGHTNING)) 61 (type) == BFA_MFG_TYPE_LIGHTNING || \
62 (type) == BFA_MFG_TYPE_CHINOOK))
57 63
58/* 64/*
59 * Check if the card having old wwn/mac handling 65 * Check if the card having old wwn/mac handling
@@ -124,30 +130,53 @@ enum bfa_status {
124 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists, 130 BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
125 * contact support */ 131 * contact support */
126 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */ 132 BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
133 BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
134 BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
135 BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */
127 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */ 136 BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
137 BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */
138 BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */
128 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */ 139 BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
129 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */ 140 BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
130 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */ 141 BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
131 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */ 142 BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */
132 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */ 143 BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */
133 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ 144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
134 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ 146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
147 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
135 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ 148 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
149 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
136 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */ 150 BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
137 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists 151 BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
138 * contact support */ 152 * contact support */
139 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */ 153 BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
154 BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */
155 BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational */
156 BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */
140 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */ 157 BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
158 BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
141 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ 159 BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
142 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ 160 BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
161 BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
162 BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
163 BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
143 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 164 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
144 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 165 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
145 * configuration */ 166 * configuration */
167 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
146 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on 168 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
147 * this adapter */ 169 * this adapter */
148 BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on 170 BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on
149 * the adapter */ 171 * the adapter */
150 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ 172 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
173 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
174 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
175 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
176 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
177 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
178 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
179 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
151 BFA_STATUS_MAX_VAL /* Unknown error code */ 180 BFA_STATUS_MAX_VAL /* Unknown error code */
152}; 181};
153#define bfa_status_t enum bfa_status 182#define bfa_status_t enum bfa_status
@@ -265,6 +294,8 @@ enum bfa_ioc_state {
265 BFA_IOC_DISABLED = 10, /* IOC is disabled */ 294 BFA_IOC_DISABLED = 10, /* IOC is disabled */
266 BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */ 295 BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */
267 BFA_IOC_ENABLING = 12, /* IOC is being enabled */ 296 BFA_IOC_ENABLING = 12, /* IOC is being enabled */
297 BFA_IOC_HWFAIL = 13, /* PCI mapping doesn't exist */
298 BFA_IOC_ACQ_ADDR = 14, /* Acquiring addr from fabric */
268}; 299};
269 300
270/* 301/*
@@ -294,6 +325,7 @@ struct bfa_ioc_drv_stats_s {
294 u32 enable_reqs; 325 u32 enable_reqs;
295 u32 disable_replies; 326 u32 disable_replies;
296 u32 enable_replies; 327 u32 enable_replies;
328 u32 rsvd;
297}; 329};
298 330
299/* 331/*
@@ -320,7 +352,10 @@ struct bfa_ioc_attr_s {
320 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ 352 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
321 struct bfa_ioc_pci_attr_s pci_attr; 353 struct bfa_ioc_pci_attr_s pci_attr;
322 u8 port_id; /* port number */ 354 u8 port_id; /* port number */
323 u8 rsvd[7]; /* 64bit align */ 355 u8 port_mode; /* bfa_mode_s */
356 u8 cap_bm; /* capability */
357 u8 port_mode_cfg; /* bfa_mode_s */
358 u8 rsvd[4]; /* 64bit align */
324}; 359};
325 360
326/* 361/*
@@ -337,6 +372,21 @@ struct bfa_ioc_attr_s {
337#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 372#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
338#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 373#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
339#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 374#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
375/*
376 * Initial capability definition
377 */
378#define BFA_MFG_IC_FC 0x01
379#define BFA_MFG_IC_ETH 0x02
380
381/*
382 * Adapter capability mask definition
383 */
384#define BFA_CM_HBA 0x01
385#define BFA_CM_CNA 0x02
386#define BFA_CM_NIC 0x04
387#define BFA_CM_FC16G 0x08
388#define BFA_CM_SRIOV 0x10
389#define BFA_CM_MEZZ 0x20
340 390
341#pragma pack(1) 391#pragma pack(1)
342 392
@@ -344,31 +394,39 @@ struct bfa_ioc_attr_s {
344 * All numerical fields are in big-endian format. 394 * All numerical fields are in big-endian format.
345 */ 395 */
346struct bfa_mfg_block_s { 396struct bfa_mfg_block_s {
347 u8 version; /* manufacturing block version */ 397 u8 version; /*!< manufacturing block version */
348 u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ 398 u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
349 u16 mfgsize; /* mfg block size */ 399 u16 mfgsize; /*!< mfg block size */
350 u16 u16_chksum; /* old u16 checksum */ 400 u16 u16_chksum; /*!< old u16 checksum */
351 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; 401 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
352 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; 402 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
353 u8 mfg_day; /* manufacturing day */ 403 u8 mfg_day; /*!< manufacturing day */
354 u8 mfg_month; /* manufacturing month */ 404 u8 mfg_month; /*!< manufacturing month */
355 u16 mfg_year; /* manufacturing year */ 405 u16 mfg_year; /*!< manufacturing year */
356 wwn_t mfg_wwn; /* wwn base for this adapter */ 406 wwn_t mfg_wwn; /*!< wwn base for this adapter */
357 u8 num_wwn; /* number of wwns assigned */ 407 u8 num_wwn; /*!< number of wwns assigned */
358 u8 mfg_speeds; /* speeds allowed for this adapter */ 408 u8 mfg_speeds; /*!< speeds allowed for this adapter */
359 u8 rsv[2]; 409 u8 rsv[2];
360 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; 410 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
361 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; 411 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
362 char 412 char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
363 supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; 413 char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
364 char 414 mac_t mfg_mac; /*!< base mac address */
365 supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; 415 u8 num_mac; /*!< number of mac addresses */
366 mac_t mfg_mac; /* mac address */ 416 u8 rsv2;
367 u8 num_mac; /* number of mac addresses */ 417 u32 card_type; /*!< card type */
368 u8 rsv2; 418 char cap_nic; /*!< capability nic */
369 u32 mfg_type; /* card type */ 419 char cap_cna; /*!< capability cna */
370 u8 rsv3[108]; 420 char cap_hba; /*!< capability hba */
371 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ 421 char cap_fc16g; /*!< capability fc 16g */
422 char cap_sriov; /*!< capability sriov */
423 char cap_mezz; /*!< capability mezz */
424 u8 rsv3;
425 u8 mfg_nports; /*!< number of ports */
426 char media[8]; /*!< xfi/xaui */
427 char initial_mode[8]; /*!< initial mode: hba/cna/nic */
428 u8 rsv4[84];
429 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
372}; 430};
373 431
374#pragma pack() 432#pragma pack()
@@ -386,17 +444,27 @@ enum {
386 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17, 444 BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
387 BFA_PCI_DEVICE_ID_CT = 0x14, 445 BFA_PCI_DEVICE_ID_CT = 0x14,
388 BFA_PCI_DEVICE_ID_CT_FC = 0x21, 446 BFA_PCI_DEVICE_ID_CT_FC = 0x21,
447 BFA_PCI_DEVICE_ID_CT2 = 0x22,
389}; 448};
390 449
391#define bfa_asic_id_ct(devid) \ 450#define bfa_asic_id_cb(__d) \
392 ((devid) == BFA_PCI_DEVICE_ID_CT || \ 451 ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P || \
393 (devid) == BFA_PCI_DEVICE_ID_CT_FC) 452 (__d) == BFA_PCI_DEVICE_ID_FC_8G1P)
453#define bfa_asic_id_ct(__d) \
454 ((__d) == BFA_PCI_DEVICE_ID_CT || \
455 (__d) == BFA_PCI_DEVICE_ID_CT_FC)
456#define bfa_asic_id_ct2(__d) ((__d) == BFA_PCI_DEVICE_ID_CT2)
457#define bfa_asic_id_ctc(__d) \
458 (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
394 459
395/* 460/*
396 * PCI sub-system device and vendor ID information 461 * PCI sub-system device and vendor ID information
397 */ 462 */
398enum { 463enum {
399 BFA_PCI_FCOE_SSDEVICE_ID = 0x14, 464 BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
465 BFA_PCI_CT2_SSID_FCoE = 0x22,
466 BFA_PCI_CT2_SSID_ETH = 0x23,
467 BFA_PCI_CT2_SSID_FC = 0x24,
400}; 468};
401 469
402/* 470/*
@@ -416,9 +484,7 @@ enum bfa_port_speed {
416 BFA_PORT_SPEED_8GBPS = 8, 484 BFA_PORT_SPEED_8GBPS = 8,
417 BFA_PORT_SPEED_10GBPS = 10, 485 BFA_PORT_SPEED_10GBPS = 10,
418 BFA_PORT_SPEED_16GBPS = 16, 486 BFA_PORT_SPEED_16GBPS = 16,
419 BFA_PORT_SPEED_AUTO = 487 BFA_PORT_SPEED_AUTO = 0xf,
420 (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
421 BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
422}; 488};
423#define bfa_port_speed_t enum bfa_port_speed 489#define bfa_port_speed_t enum bfa_port_speed
424 490
@@ -463,4 +529,453 @@ struct bfa_boot_pbc_s {
463 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; 529 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
464}; 530};
465 531
532/*
533 * ASIC block configuration related structures
534 */
535#define BFA_ABLK_MAX_PORTS 2
536#define BFA_ABLK_MAX_PFS 16
537#define BFA_ABLK_MAX 2
538
539#pragma pack(1)
540enum bfa_mode_s {
541 BFA_MODE_HBA = 1,
542 BFA_MODE_CNA = 2,
543 BFA_MODE_NIC = 3
544};
545
546struct bfa_adapter_cfg_mode_s {
547 u16 max_pf;
548 u16 max_vf;
549 enum bfa_mode_s mode;
550};
551
552struct bfa_ablk_cfg_pf_s {
553 u16 pers;
554 u8 port_id;
555 u8 optrom;
556 u8 valid;
557 u8 sriov;
558 u8 max_vfs;
559 u8 rsvd[1];
560 u16 num_qpairs;
561 u16 num_vectors;
562 u32 bw;
563};
564
565struct bfa_ablk_cfg_port_s {
566 u8 mode;
567 u8 type;
568 u8 max_pfs;
569 u8 rsvd[5];
570};
571
572struct bfa_ablk_cfg_inst_s {
573 u8 nports;
574 u8 max_pfs;
575 u8 rsvd[6];
576 struct bfa_ablk_cfg_pf_s pf_cfg[BFA_ABLK_MAX_PFS];
577 struct bfa_ablk_cfg_port_s port_cfg[BFA_ABLK_MAX_PORTS];
578};
579
580struct bfa_ablk_cfg_s {
581 struct bfa_ablk_cfg_inst_s inst[BFA_ABLK_MAX];
582};
583
584
585/*
586 * SFP module specific
587 */
588#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
589
590enum bfa_defs_sfp_media_e {
591 BFA_SFP_MEDIA_UNKNOWN = 0x00,
592 BFA_SFP_MEDIA_CU = 0x01,
593 BFA_SFP_MEDIA_LW = 0x02,
594 BFA_SFP_MEDIA_SW = 0x03,
595 BFA_SFP_MEDIA_EL = 0x04,
596 BFA_SFP_MEDIA_UNSUPPORT = 0x05,
597};
598
599/*
600 * values for xmtr_tech above
601 */
602enum {
603 SFP_XMTR_TECH_CU = (1 << 0), /* copper FC-BaseT */
604 SFP_XMTR_TECH_CP = (1 << 1), /* copper passive */
605 SFP_XMTR_TECH_CA = (1 << 2), /* copper active */
606 SFP_XMTR_TECH_LL = (1 << 3), /* longwave laser */
607 SFP_XMTR_TECH_SL = (1 << 4), /* shortwave laser w/ OFC */
608 SFP_XMTR_TECH_SN = (1 << 5), /* shortwave laser w/o OFC */
609 SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */
610 SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */
611 SFP_XMTR_TECH_LC = (1 << 8), /* longwave laser */
612 SFP_XMTR_TECH_SA = (1 << 9)
613};
614
615/*
616 * Serial ID: Data Fields -- Address A0h
617 * Basic ID field total 64 bytes
618 */
619struct sfp_srlid_base_s {
620 u8 id; /* 00: Identifier */
621 u8 extid; /* 01: Extended Identifier */
622 u8 connector; /* 02: Connector */
623 u8 xcvr[8]; /* 03-10: Transceiver */
624 u8 encoding; /* 11: Encoding */
625 u8 br_norm; /* 12: BR, Nominal */
626 u8 rate_id; /* 13: Rate Identifier */
627 u8 len_km; /* 14: Length single mode km */
628 u8 len_100m; /* 15: Length single mode 100m */
629 u8 len_om2; /* 16: Length om2 fiber 10m */
630 u8 len_om1; /* 17: Length om1 fiber 10m */
631 u8 len_cu; /* 18: Length copper 1m */
632 u8 len_om3; /* 19: Length om3 fiber 10m */
633 u8 vendor_name[16];/* 20-35 */
634 u8 unalloc1;
635 u8 vendor_oui[3]; /* 37-39 */
636 u8 vendor_pn[16]; /* 40-55 */
637 u8 vendor_rev[4]; /* 56-59 */
638 u8 wavelen[2]; /* 60-61 */
639 u8 unalloc2;
640 u8 cc_base; /* 63: check code for base id field */
641};
642
643/*
644 * Serial ID: Data Fields -- Address A0h
645 * Extended id field total 32 bytes
646 */
647struct sfp_srlid_ext_s {
648 u8 options[2];
649 u8 br_max;
650 u8 br_min;
651 u8 vendor_sn[16];
652 u8 date_code[8];
653 u8 diag_mon_type; /* 92: Diagnostic Monitoring type */
654 u8 en_options;
655 u8 sff_8472;
656 u8 cc_ext;
657};
658
659/*
660 * Diagnostic: Data Fields -- Address A2h
661 * Diagnostic and control/status base field total 96 bytes
662 */
663struct sfp_diag_base_s {
664 /*
665 * Alarm and warning Thresholds 40 bytes
666 */
667 u8 temp_high_alarm[2]; /* 00-01 */
668 u8 temp_low_alarm[2]; /* 02-03 */
669 u8 temp_high_warning[2]; /* 04-05 */
670 u8 temp_low_warning[2]; /* 06-07 */
671
672 u8 volt_high_alarm[2]; /* 08-09 */
673 u8 volt_low_alarm[2]; /* 10-11 */
674 u8 volt_high_warning[2]; /* 12-13 */
675 u8 volt_low_warning[2]; /* 14-15 */
676
677 u8 bias_high_alarm[2]; /* 16-17 */
678 u8 bias_low_alarm[2]; /* 18-19 */
679 u8 bias_high_warning[2]; /* 20-21 */
680 u8 bias_low_warning[2]; /* 22-23 */
681
682 u8 tx_pwr_high_alarm[2]; /* 24-25 */
683 u8 tx_pwr_low_alarm[2]; /* 26-27 */
684 u8 tx_pwr_high_warning[2]; /* 28-29 */
685 u8 tx_pwr_low_warning[2]; /* 30-31 */
686
687 u8 rx_pwr_high_alarm[2]; /* 32-33 */
688 u8 rx_pwr_low_alarm[2]; /* 34-35 */
689 u8 rx_pwr_high_warning[2]; /* 36-37 */
690 u8 rx_pwr_low_warning[2]; /* 38-39 */
691
692 u8 unallocate_1[16];
693
694 /*
695 * ext_cal_const[36]
696 */
697 u8 rx_pwr[20];
698 u8 tx_i[4];
699 u8 tx_pwr[4];
700 u8 temp[4];
701 u8 volt[4];
702 u8 unallocate_2[3];
703 u8 cc_dmi;
704};
705
706/*
707 * Diagnostic: Data Fields -- Address A2h
708 * Diagnostic and control/status extended field total 24 bytes
709 */
710struct sfp_diag_ext_s {
711 u8 diag[SFP_DIAGMON_SIZE];
712 u8 unalloc1[4];
713 u8 status_ctl;
714 u8 rsvd;
715 u8 alarm_flags[2];
716 u8 unalloc2[2];
717 u8 warning_flags[2];
718 u8 ext_status_ctl[2];
719};
720
721struct sfp_mem_s {
722 struct sfp_srlid_base_s srlid_base;
723 struct sfp_srlid_ext_s srlid_ext;
724 struct sfp_diag_base_s diag_base;
725 struct sfp_diag_ext_s diag_ext;
726};
727
728/*
729 * transceiver codes (SFF-8472 Rev 10.2 Table 3.5)
730 */
731union sfp_xcvr_e10g_code_u {
732 u8 b;
733 struct {
734#ifdef __BIGENDIAN
735 u8 e10g_unall:1; /* 10G Ethernet compliance */
736 u8 e10g_lrm:1;
737 u8 e10g_lr:1;
738 u8 e10g_sr:1;
739 u8 ib_sx:1; /* Infiniband compliance */
740 u8 ib_lx:1;
741 u8 ib_cu_a:1;
742 u8 ib_cu_p:1;
743#else
744 u8 ib_cu_p:1;
745 u8 ib_cu_a:1;
746 u8 ib_lx:1;
747 u8 ib_sx:1; /* Infiniband compliance */
748 u8 e10g_sr:1;
749 u8 e10g_lr:1;
750 u8 e10g_lrm:1;
751 u8 e10g_unall:1; /* 10G Ethernet compliance */
752#endif
753 } r;
754};
755
756union sfp_xcvr_so1_code_u {
757 u8 b;
758 struct {
759 u8 escon:2; /* ESCON compliance code */
760 u8 oc192_reach:1; /* SONET compliance code */
761 u8 so_reach:2;
762 u8 oc48_reach:3;
763 } r;
764};
765
766union sfp_xcvr_so2_code_u {
767 u8 b;
768 struct {
769 u8 reserved:1;
770 u8 oc12_reach:3; /* OC12 reach */
771 u8 reserved1:1;
772 u8 oc3_reach:3; /* OC3 reach */
773 } r;
774};
775
776union sfp_xcvr_eth_code_u {
777 u8 b;
778 struct {
779 u8 base_px:1;
780 u8 base_bx10:1;
781 u8 e100base_fx:1;
782 u8 e100base_lx:1;
783 u8 e1000base_t:1;
784 u8 e1000base_cx:1;
785 u8 e1000base_lx:1;
786 u8 e1000base_sx:1;
787 } r;
788};
789
790struct sfp_xcvr_fc1_code_s {
791 u8 link_len:5; /* FC link length */
792 u8 xmtr_tech2:3;
793 u8 xmtr_tech1:7; /* FC transmitter technology */
794 u8 reserved1:1;
795};
796
797union sfp_xcvr_fc2_code_u {
798 u8 b;
799 struct {
800 u8 tw_media:1; /* twin axial pair (tw) */
801 u8 tp_media:1; /* shielded twisted pair (sp) */
802 u8 mi_media:1; /* miniature coax (mi) */
803 u8 tv_media:1; /* video coax (tv) */
804 u8 m6_media:1; /* multimode, 62.5m (m6) */
805 u8 m5_media:1; /* multimode, 50m (m5) */
806 u8 reserved:1;
807 u8 sm_media:1; /* single mode (sm) */
808 } r;
809};
810
811union sfp_xcvr_fc3_code_u {
812 u8 b;
813 struct {
814#ifdef __BIGENDIAN
815 u8 rsv4:1;
816 u8 mb800:1; /* 800 Mbytes/sec */
817 u8 mb1600:1; /* 1600 Mbytes/sec */
818 u8 mb400:1; /* 400 Mbytes/sec */
819 u8 rsv2:1;
820 u8 mb200:1; /* 200 Mbytes/sec */
821 u8 rsv1:1;
822 u8 mb100:1; /* 100 Mbytes/sec */
823#else
824 u8 mb100:1; /* 100 Mbytes/sec */
825 u8 rsv1:1;
826 u8 mb200:1; /* 200 Mbytes/sec */
827 u8 rsv2:1;
828 u8 mb400:1; /* 400 Mbytes/sec */
829 u8 mb1600:1; /* 1600 Mbytes/sec */
830 u8 mb800:1; /* 800 Mbytes/sec */
831 u8 rsv4:1;
832#endif
833 } r;
834};
835
836struct sfp_xcvr_s {
837 union sfp_xcvr_e10g_code_u e10g;
838 union sfp_xcvr_so1_code_u so1;
839 union sfp_xcvr_so2_code_u so2;
840 union sfp_xcvr_eth_code_u eth;
841 struct sfp_xcvr_fc1_code_s fc1;
842 union sfp_xcvr_fc2_code_u fc2;
843 union sfp_xcvr_fc3_code_u fc3;
844};
845
846/*
847 * Flash module specific
848 */
849#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
850#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
851
852enum bfa_flash_part_type {
853 BFA_FLASH_PART_OPTROM = 1, /* option rom partition */
854 BFA_FLASH_PART_FWIMG = 2, /* firmware image partition */
855 BFA_FLASH_PART_FWCFG = 3, /* firmware tuneable config */
856 BFA_FLASH_PART_DRV = 4, /* IOC driver config */
857 BFA_FLASH_PART_BOOT = 5, /* boot config */
858 BFA_FLASH_PART_ASIC = 6, /* asic bootstrap configuration */
859 BFA_FLASH_PART_MFG = 7, /* manufacturing block partition */
860 BFA_FLASH_PART_OPTROM2 = 8, /* 2nd option rom partition */
861 BFA_FLASH_PART_VPD = 9, /* vpd data of OEM info */
862 BFA_FLASH_PART_PBC = 10, /* pre-boot config */
863 BFA_FLASH_PART_BOOTOVL = 11, /* boot overlay partition */
864 BFA_FLASH_PART_LOG = 12, /* firmware log partition */
865 BFA_FLASH_PART_PXECFG = 13, /* pxe boot config partition */
866 BFA_FLASH_PART_PXEOVL = 14, /* pxe boot overlay partition */
867 BFA_FLASH_PART_PORTCFG = 15, /* port cfg partition */
868 BFA_FLASH_PART_ASICBK = 16, /* asic backup partition */
869};
870
871/*
872 * flash partition attributes
873 */
874struct bfa_flash_part_attr_s {
875 u32 part_type; /* partition type */
876 u32 part_instance; /* partition instance */
877 u32 part_off; /* partition offset */
878 u32 part_size; /* partition size */
879 u32 part_len; /* partition content length */
880 u32 part_status; /* partition status */
881 char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
882};
883
884/*
885 * flash attributes
886 */
887struct bfa_flash_attr_s {
888 u32 status; /* flash overall status */
889 u32 npart; /* num of partitions */
890 struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX];
891};
892
893/*
894 * DIAG module specific
895 */
896#define LB_PATTERN_DEFAULT 0xB5B5B5B5
897#define QTEST_CNT_DEFAULT 10
898#define QTEST_PAT_DEFAULT LB_PATTERN_DEFAULT
899
900struct bfa_diag_memtest_s {
901 u8 algo;
902 u8 rsvd[7];
903};
904
905struct bfa_diag_memtest_result {
906 u32 status;
907 u32 addr;
908 u32 exp; /* expect value read from reg */
909 u32 act; /* actually value read */
910 u32 err_status; /* error status reg */
911 u32 err_status1; /* extra error info reg */
912 u32 err_addr; /* error address reg */
913 u8 algo;
914 u8 rsv[3];
915};
916
917struct bfa_diag_loopback_result_s {
918 u32 numtxmfrm; /* no. of transmit frame */
919 u32 numosffrm; /* no. of outstanding frame */
920 u32 numrcvfrm; /* no. of received good frame */
921 u32 badfrminf; /* mis-match info */
922 u32 badfrmnum; /* mis-match fram number */
923 u8 status; /* loopback test result */
924 u8 rsvd[3];
925};
926
927struct bfa_diag_ledtest_s {
928 u32 cmd; /* bfa_led_op_t */
929 u32 color; /* bfa_led_color_t */
930 u16 freq; /* no. of blinks every 10 secs */
931 u8 led; /* bitmap of LEDs to be tested */
932 u8 rsvd[5];
933};
934
935struct bfa_diag_loopback_s {
936 u32 loopcnt;
937 u32 pattern;
938 u8 lb_mode; /* bfa_port_opmode_t */
939 u8 speed; /* bfa_port_speed_t */
940 u8 rsvd[2];
941};
942
943/*
944 * PHY module specific
945 */
946enum bfa_phy_status_e {
947 BFA_PHY_STATUS_GOOD = 0, /* phy is good */
948 BFA_PHY_STATUS_NOT_PRESENT = 1, /* phy does not exist */
949 BFA_PHY_STATUS_BAD = 2, /* phy is bad */
950};
951
952/*
953 * phy attributes for phy query
954 */
955struct bfa_phy_attr_s {
956 u32 status; /* phy present/absent status */
957 u32 length; /* firmware length */
958 u32 fw_ver; /* firmware version */
959 u32 an_status; /* AN status */
960 u32 pma_pmd_status; /* PMA/PMD link status */
961 u32 pma_pmd_signal; /* PMA/PMD signal detect */
962 u32 pcs_status; /* PCS link status */
963};
964
965/*
966 * phy stats
967 */
968struct bfa_phy_stats_s {
969 u32 status; /* phy stats status */
970 u32 link_breaks; /* Num of link breaks after linkup */
971 u32 pma_pmd_fault; /* NPMA/PMD fault */
972 u32 pcs_fault; /* PCS fault */
973 u32 speed_neg; /* Num of speed negotiation */
974 u32 tx_eq_training; /* Num of TX EQ training */
975 u32 tx_eq_timeout; /* Num of TX EQ timeout */
976 u32 crc_error; /* Num of CRC errors */
977};
978
979#pragma pack()
980
466#endif /* __BFA_DEFS_H__ */ 981#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 191d34a58b9c..3bbc583f65cf 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -90,12 +90,14 @@ enum bfa_lport_role {
90 * FCS port configuration. 90 * FCS port configuration.
91 */ 91 */
92struct bfa_lport_cfg_s { 92struct bfa_lport_cfg_s {
93 wwn_t pwwn; /* port wwn */ 93 wwn_t pwwn; /* port wwn */
94 wwn_t nwwn; /* node wwn */ 94 wwn_t nwwn; /* node wwn */
95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ 95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
96 bfa_boolean_t preboot_vp; /* vport created from PBC */ 96 enum bfa_lport_role roles; /* FCS port roles */
97 enum bfa_lport_role roles; /* FCS port roles */ 97 u32 rsvd;
98 u8 tag[16]; /* opaque tag from application */ 98 bfa_boolean_t preboot_vp; /* vport created from PBC */
99 u8 tag[16]; /* opaque tag from application */
100 u8 padding[4];
99}; 101};
100 102
101/* 103/*
@@ -249,12 +251,13 @@ enum bfa_vport_state {
249 BFA_FCS_VPORT_FDISC_SEND = 2, 251 BFA_FCS_VPORT_FDISC_SEND = 2,
250 BFA_FCS_VPORT_FDISC = 3, 252 BFA_FCS_VPORT_FDISC = 3,
251 BFA_FCS_VPORT_FDISC_RETRY = 4, 253 BFA_FCS_VPORT_FDISC_RETRY = 4,
252 BFA_FCS_VPORT_ONLINE = 5, 254 BFA_FCS_VPORT_FDISC_RSP_WAIT = 5,
253 BFA_FCS_VPORT_DELETING = 6, 255 BFA_FCS_VPORT_ONLINE = 6,
254 BFA_FCS_VPORT_CLEANUP = 6, 256 BFA_FCS_VPORT_DELETING = 7,
255 BFA_FCS_VPORT_LOGO_SEND = 7, 257 BFA_FCS_VPORT_CLEANUP = 8,
256 BFA_FCS_VPORT_LOGO = 8, 258 BFA_FCS_VPORT_LOGO_SEND = 9,
257 BFA_FCS_VPORT_ERROR = 9, 259 BFA_FCS_VPORT_LOGO = 10,
260 BFA_FCS_VPORT_ERROR = 11,
258 BFA_FCS_VPORT_MAX_STATE, 261 BFA_FCS_VPORT_MAX_STATE,
259}; 262};
260 263
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 207f598877c7..0b97525803fb 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -47,13 +47,12 @@ struct bfa_iocfc_fwcfg_s {
47 u16 num_rports; /* number of remote ports */ 47 u16 num_rports; /* number of remote ports */
48 u16 num_ioim_reqs; /* number of IO reqs */ 48 u16 num_ioim_reqs; /* number of IO reqs */
49 u16 num_tskim_reqs; /* task management requests */ 49 u16 num_tskim_reqs; /* task management requests */
50 u16 num_iotm_reqs; /* number of TM IO reqs */ 50 u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
51 u16 num_tsktm_reqs; /* TM task management requests*/
52 u16 num_fcxp_reqs; /* unassisted FC exchanges */ 51 u16 num_fcxp_reqs; /* unassisted FC exchanges */
53 u16 num_uf_bufs; /* unsolicited recv buffers */ 52 u16 num_uf_bufs; /* unsolicited recv buffers */
54 u8 num_cqs; 53 u8 num_cqs;
55 u8 fw_tick_res; /* FW clock resolution in ms */ 54 u8 fw_tick_res; /* FW clock resolution in ms */
56 u8 rsvd[4]; 55 u8 rsvd[2];
57}; 56};
58#pragma pack() 57#pragma pack()
59 58
@@ -66,8 +65,12 @@ struct bfa_iocfc_drvcfg_s {
66 u16 ioc_recover; /* IOC recovery mode */ 65 u16 ioc_recover; /* IOC recovery mode */
67 u16 min_cfg; /* minimum configuration */ 66 u16 min_cfg; /* minimum configuration */
68 u16 path_tov; /* device path timeout */ 67 u16 path_tov; /* device path timeout */
68 u16 num_tio_reqs; /*!< number of TM IO reqs */
69 u8 port_mode;
70 u8 rsvd_a;
69 bfa_boolean_t delay_comp; /* delay completion of 71 bfa_boolean_t delay_comp; /* delay completion of
70 failed inflight IOs */ 72 failed inflight IOs */
73 u16 num_ttsk_reqs; /* TM task management requests */
71 u32 rsvd; 74 u32 rsvd;
72}; 75};
73 76
@@ -82,7 +85,7 @@ struct bfa_iocfc_cfg_s {
82/* 85/*
83 * IOC firmware IO stats 86 * IOC firmware IO stats
84 */ 87 */
85struct bfa_fw_io_stats_s { 88struct bfa_fw_ioim_stats_s {
86 u32 host_abort; /* IO aborted by host driver*/ 89 u32 host_abort; /* IO aborted by host driver*/
87 u32 host_cleanup; /* IO clean up by host driver */ 90 u32 host_cleanup; /* IO clean up by host driver */
88 91
@@ -152,6 +155,54 @@ struct bfa_fw_io_stats_s {
152 */ 155 */
153}; 156};
154 157
158struct bfa_fw_tio_stats_s {
159 u32 tio_conf_proc; /* TIO CONF processed */
160 u32 tio_conf_drop; /* TIO CONF dropped */
161 u32 tio_cleanup_req; /* TIO cleanup requested */
162 u32 tio_cleanup_comp; /* TIO cleanup completed */
163 u32 tio_abort_rsp; /* TIO abort response */
164 u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
165 u32 tio_abts_req; /* TIO ABTS requested */
166 u32 tio_abts_ack; /* TIO ABTS ack-ed */
167 u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
168 u32 tio_abts_tmo; /* TIO ABTS timeout */
169 u32 tio_snsdata_dma; /* TIO sense data DMA */
170 u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
171 u32 tio_rxwchan_avail; /* TIO RX wait channel available */
172 u32 tio_hit_bls; /* TIO IOH BLS event */
173 u32 tio_uf_recv; /* TIO received UF */
174 u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
175 u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
176
177 u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
178 u32 ds_rxwchan_avail; /* DS RX wait channel available */
179 u32 ds_unaligned_rd; /* DS unaligned read */
180 u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
181 u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
182 u32 ds_flush_req; /* DS flush requested */
183 u32 ds_flush_comp; /* DS flush completed */
184 u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
185 u32 ds_seq_cnt_err; /* DS seq cnt error */
186 u32 ds_seq_len_err; /* DS seq len error */
187 u32 ds_data_oor; /* DS data out of order */
188 u32 ds_hit_bls; /* DS hit BLS */
189 u32 ds_edtov_timer_exp; /* DS edtov expired */
190 u32 ds_cpu_owned; /* DS cpu owned */
191 u32 ds_hit_class2; /* DS hit class2 */
192 u32 ds_length_err; /* DS length error */
193 u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
194 u32 ds_rectov_timer_exp; /* DS rectov expired */
195 u32 ds_unexp_fr_err; /* DS unexp frame error */
196};
197
198/*
199 * IOC firmware IO stats
200 */
201struct bfa_fw_io_stats_s {
202 struct bfa_fw_ioim_stats_s ioim_stats;
203 struct bfa_fw_tio_stats_s tio_stats;
204};
205
155/* 206/*
156 * IOC port firmware stats 207 * IOC port firmware stats
157 */ 208 */
@@ -205,6 +256,7 @@ struct bfa_fw_port_lksm_stats_s {
205 u32 nos_tx; /* No. of times NOS tx started */ 256 u32 nos_tx; /* No. of times NOS tx started */
206 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ 257 u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
207 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ 258 u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
259 u32 bbsc_lr; /* LKSM LR tx for credit recovery */
208}; 260};
209 261
210struct bfa_fw_port_snsm_stats_s { 262struct bfa_fw_port_snsm_stats_s {
@@ -266,8 +318,8 @@ struct bfa_fw_fcoe_stats_s {
266 * IOC firmware FCoE port stats 318 * IOC firmware FCoE port stats
267 */ 319 */
268struct bfa_fw_fcoe_port_stats_s { 320struct bfa_fw_fcoe_port_stats_s {
269 struct bfa_fw_fcoe_stats_s fcoe_stats; 321 struct bfa_fw_fcoe_stats_s fcoe_stats;
270 struct bfa_fw_fip_stats_s fip_stats; 322 struct bfa_fw_fip_stats_s fip_stats;
271}; 323};
272 324
273/* 325/*
@@ -636,6 +688,7 @@ enum bfa_port_states {
636 BFA_PORT_ST_FWMISMATCH = 12, 688 BFA_PORT_ST_FWMISMATCH = 12,
637 BFA_PORT_ST_PREBOOT_DISABLED = 13, 689 BFA_PORT_ST_PREBOOT_DISABLED = 13,
638 BFA_PORT_ST_TOGGLING_QWAIT = 14, 690 BFA_PORT_ST_TOGGLING_QWAIT = 14,
691 BFA_PORT_ST_ACQ_ADDR = 15,
639 BFA_PORT_ST_MAX_STATE, 692 BFA_PORT_ST_MAX_STATE,
640}; 693};
641 694
@@ -748,6 +801,10 @@ struct bfa_port_cfg_s {
748 u8 tx_bbcredit; /* transmit buffer credits */ 801 u8 tx_bbcredit; /* transmit buffer credits */
749 u8 ratelimit; /* ratelimit enabled or not */ 802 u8 ratelimit; /* ratelimit enabled or not */
750 u8 trl_def_speed; /* ratelimit default speed */ 803 u8 trl_def_speed; /* ratelimit default speed */
804 u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
805 u8 bb_scn_state; /* Config state of BB_SCN */
806 u8 faa_state; /* FAA enabled/disabled */
807 u8 rsvd[1];
751 u16 path_tov; /* device path timeout */ 808 u16 path_tov; /* device path timeout */
752 u16 q_depth; /* SCSI Queue depth */ 809 u16 q_depth; /* SCSI Queue depth */
753}; 810};
@@ -783,7 +840,7 @@ struct bfa_port_attr_s {
783 enum bfa_port_topology topology; /* current topology */ 840 enum bfa_port_topology topology; /* current topology */
784 bfa_boolean_t beacon; /* current beacon status */ 841 bfa_boolean_t beacon; /* current beacon status */
785 bfa_boolean_t link_e2e_beacon; /* link beacon is on */ 842 bfa_boolean_t link_e2e_beacon; /* link beacon is on */
786 bfa_boolean_t plog_enabled; /* portlog is enabled */ 843 bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */
787 844
788 /* 845 /*
789 * Dynamic field - info from FCS 846 * Dynamic field - info from FCS
@@ -792,12 +849,10 @@ struct bfa_port_attr_s {
792 enum bfa_port_type port_type; /* current topology */ 849 enum bfa_port_type port_type; /* current topology */
793 u32 loopback; /* external loopback */ 850 u32 loopback; /* external loopback */
794 u32 authfail; /* auth fail state */ 851 u32 authfail; /* auth fail state */
795 bfa_boolean_t io_profile; /* get it from fcpim mod */
796 u8 pad[4]; /* for 64-bit alignement */
797 852
798 /* FCoE specific */ 853 /* FCoE specific */
799 u16 fcoe_vlan; 854 u16 fcoe_vlan;
800 u8 rsvd1[6]; 855 u8 rsvd1[2];
801}; 856};
802 857
803/* 858/*
@@ -988,6 +1043,19 @@ struct bfa_itnim_ioprofile_s {
988}; 1043};
989 1044
990/* 1045/*
1046 * vHBA port attribute values.
1047 */
1048struct bfa_vhba_attr_s {
1049 wwn_t nwwn; /* node wwn */
1050 wwn_t pwwn; /* port wwn */
1051 u32 pid; /* port ID */
1052 bfa_boolean_t io_profile; /* get it from fcpim mod */
1053 bfa_boolean_t plog_enabled; /* portlog is enabled */
1054 u16 path_tov;
1055 u8 rsvd[2];
1056};
1057
1058/*
991 * FC physical port statistics. 1059 * FC physical port statistics.
992 */ 1060 */
993struct bfa_port_fc_stats_s { 1061struct bfa_port_fc_stats_s {
@@ -1020,6 +1088,9 @@ struct bfa_port_fc_stats_s {
1020 u64 bad_os_count; /* Invalid ordered sets */ 1088 u64 bad_os_count; /* Invalid ordered sets */
1021 u64 err_enc_out; /* Encoding err nonframe_8b10b */ 1089 u64 err_enc_out; /* Encoding err nonframe_8b10b */
1022 u64 err_enc; /* Encoding err frame_8b10b */ 1090 u64 err_enc; /* Encoding err frame_8b10b */
1091 u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */
1092 u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */
1093 u64 bbsc_link_resets; /* Credit Recovery-Link Resets */
1023}; 1094};
1024 1095
1025/* 1096/*
@@ -1078,4 +1149,83 @@ union bfa_port_stats_u {
1078 struct bfa_port_eth_stats_s eth; 1149 struct bfa_port_eth_stats_s eth;
1079}; 1150};
1080 1151
1152struct bfa_port_cfg_mode_s {
1153 u16 max_pf;
1154 u16 max_vf;
1155 enum bfa_mode_s mode;
1156};
1157
1158#pragma pack(1)
1159
1160#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
1161#define BFA_CEE_DCBX_MAX_PRIORITY (8)
1162#define BFA_CEE_DCBX_MAX_PGID (8)
1163
1164struct bfa_cee_lldp_str_s {
1165 u8 sub_type;
1166 u8 len;
1167 u8 rsvd[2];
1168 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
1169};
1170
1171struct bfa_cee_lldp_cfg_s {
1172 struct bfa_cee_lldp_str_s chassis_id;
1173 struct bfa_cee_lldp_str_s port_id;
1174 struct bfa_cee_lldp_str_s port_desc;
1175 struct bfa_cee_lldp_str_s sys_name;
1176 struct bfa_cee_lldp_str_s sys_desc;
1177 struct bfa_cee_lldp_str_s mgmt_addr;
1178 u16 time_to_live;
1179 u16 enabled_system_cap;
1180};
1181
1182/* CEE/DCBX parameters */
1183struct bfa_cee_dcbx_cfg_s {
1184 u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
1185 u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
1186 u8 pfc_primap; /* bitmap of priorties with PFC enabled */
1187 u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
1188 u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
1189 u8 dcbx_version; /* operating version:CEE or preCEE */
1190 u8 lls_fcoe; /* FCoE Logical Link Status */
1191 u8 lls_lan; /* LAN Logical Link Status */
1192 u8 rsvd[2];
1193};
1194
1195/* CEE Query */
1196struct bfa_cee_attr_s {
1197 u8 cee_status;
1198 u8 error_reason;
1199 struct bfa_cee_lldp_cfg_s lldp_remote;
1200 struct bfa_cee_dcbx_cfg_s dcbx_remote;
1201 mac_t src_mac;
1202 u8 link_speed;
1203 u8 nw_priority;
1204 u8 filler[2];
1205};
1206
1207/* LLDP/DCBX/CEE Statistics */
1208struct bfa_cee_stats_s {
1209 u32 lldp_tx_frames; /* LLDP Tx Frames */
1210 u32 lldp_rx_frames; /* LLDP Rx Frames */
1211 u32 lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */
1212 u32 lldp_rx_frames_new; /* LLDP Rx Frames new */
1213 u32 lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */
1214 u32 lldp_rx_shutdown_tlvs; /* LLDP Rx shutdown TLVs */
1215 u32 lldp_info_aged_out; /* LLDP remote info aged */
1216 u32 dcbx_phylink_ups; /* DCBX phy link ups */
1217 u32 dcbx_phylink_downs; /* DCBX phy link downs */
1218 u32 dcbx_rx_tlvs; /* DCBX Rx TLVs */
1219 u32 dcbx_rx_tlvs_invalid; /* DCBX Rx TLVs invalid */
1220 u32 dcbx_control_tlv_error; /* DCBX control TLV errors */
1221 u32 dcbx_feature_tlv_error; /* DCBX feature TLV errors */
1222 u32 dcbx_cee_cfg_new; /* DCBX new CEE cfg rcvd */
1223 u32 cee_status_down; /* DCB status down */
1224 u32 cee_status_up; /* DCB status up */
1225 u32 cee_hw_cfg_changed; /* DCB hw cfg changed */
1226 u32 cee_rx_invalid_cfg; /* DCB invalid cfg */
1227};
1228
1229#pragma pack()
1230
1081#endif /* __BFA_DEFS_SVC_H__ */ 1231#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index bf0067e0fd0d..8d0b88f67a38 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1021,7 +1021,7 @@ struct fc_symname_s {
1021#define FC_ED_TOV 2 1021#define FC_ED_TOV 2
1022#define FC_REC_TOV (FC_ED_TOV + 1) 1022#define FC_REC_TOV (FC_ED_TOV + 1)
1023#define FC_RA_TOV 10 1023#define FC_RA_TOV 10
1024#define FC_ELS_TOV (2 * FC_RA_TOV) 1024#define FC_ELS_TOV ((2 * FC_RA_TOV) + 1)
1025#define FC_FCCT_TOV (3 * FC_RA_TOV) 1025#define FC_FCCT_TOV (3 * FC_RA_TOV)
1026 1026
1027/* 1027/*
@@ -1049,15 +1049,6 @@ struct fc_vft_s {
1049}; 1049};
1050 1050
1051/* 1051/*
1052 * FCP
1053 */
1054enum {
1055 FCP_RJT = 0x01000000, /* SRR reject */
1056 FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
1057 FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
1058};
1059
1060/*
1061 * FCP_CMND definitions 1052 * FCP_CMND definitions
1062 */ 1053 */
1063#define FCP_CMND_CDB_LEN 16 1054#define FCP_CMND_CDB_LEN 16
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b7e253451654..17b59b8b5644 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -94,7 +94,6 @@ fcbuild_init(void)
94 */ 94 */
95 plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; 95 plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
96 plogi_tmpl.csp.verlo = FC_PH_VER_4_3; 96 plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
97 plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
98 plogi_tmpl.csp.ciro = 0x1; 97 plogi_tmpl.csp.ciro = 0x1;
99 plogi_tmpl.csp.cisc = 0x0; 98 plogi_tmpl.csp.cisc = 0x0;
100 plogi_tmpl.csp.altbbcred = 0x0; 99 plogi_tmpl.csp.altbbcred = 0x0;
@@ -156,6 +155,22 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
156 */ 155 */
157} 156}
158 157
158static void
159fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
160{
161 memset(fchs, 0, sizeof(struct fchs_s));
162
163 fchs->routing = FC_RTG_FC4_DEV_DATA;
164 fchs->cat_info = FC_CAT_SOLICIT_CTRL;
165 fchs->type = FC_TYPE_SERVICES;
166 fchs->f_ctl =
167 bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
168 FCTL_END_SEQ | FCTL_SI_XFER);
169 fchs->d_id = d_id;
170 fchs->s_id = s_id;
171 fchs->ox_id = ox_id;
172}
173
159void 174void
160fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) 175fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
161{ 176{
@@ -207,7 +222,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
207static u16 222static u16
208fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 223fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
209 __be16 ox_id, wwn_t port_name, wwn_t node_name, 224 __be16 ox_id, wwn_t port_name, wwn_t node_name,
210 u16 pdu_size, u8 els_code) 225 u16 pdu_size, u16 bb_cr, u8 els_code)
211{ 226{
212 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); 227 struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
213 228
@@ -220,6 +235,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
220 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 235 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
221 236
222 plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); 237 plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
238 plogi->csp.bbcred = cpu_to_be16(bb_cr);
223 239
224 memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); 240 memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
225 memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); 241 memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
@@ -268,15 +284,17 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
268u16 284u16
269fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, 285fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
270 __be16 ox_id, wwn_t port_name, wwn_t node_name, 286 __be16 ox_id, wwn_t port_name, wwn_t node_name,
271 u16 pdu_size, u16 local_bb_credits) 287 u16 pdu_size, u16 local_bb_credits, u8 bb_scn)
272{ 288{
273 u32 d_id = 0; 289 u32 d_id = 0;
290 u16 bbscn_rxsz = (bb_scn << 12) | pdu_size;
274 291
275 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); 292 memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
276 fc_els_rsp_build(fchs, d_id, s_id, ox_id); 293 fc_els_rsp_build(fchs, d_id, s_id, ox_id);
277 294
278 flogi->els_cmd.els_code = FC_ELS_ACC; 295 flogi->els_cmd.els_code = FC_ELS_ACC;
279 flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); 296 flogi->class3.rxsz = cpu_to_be16(pdu_size);
297 flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */
280 flogi->port_name = port_name; 298 flogi->port_name = port_name;
281 flogi->node_name = node_name; 299 flogi->node_name = node_name;
282 300
@@ -306,19 +324,19 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
306u16 324u16
307fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 325fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
308 u16 ox_id, wwn_t port_name, wwn_t node_name, 326 u16 ox_id, wwn_t port_name, wwn_t node_name,
309 u16 pdu_size) 327 u16 pdu_size, u16 bb_cr)
310{ 328{
311 return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, 329 return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
312 node_name, pdu_size, FC_ELS_PLOGI); 330 node_name, pdu_size, bb_cr, FC_ELS_PLOGI);
313} 331}
314 332
315u16 333u16
316fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, 334fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
317 u16 ox_id, wwn_t port_name, wwn_t node_name, 335 u16 ox_id, wwn_t port_name, wwn_t node_name,
318 u16 pdu_size) 336 u16 pdu_size, u16 bb_cr)
319{ 337{
320 return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, 338 return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
321 node_name, pdu_size, FC_ELS_ACC); 339 node_name, pdu_size, bb_cr, FC_ELS_ACC);
322} 340}
323 341
324enum fc_parse_status 342enum fc_parse_status
@@ -1096,6 +1114,21 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
1096} 1114}
1097 1115
1098u16 1116u16
1117fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
1118 u32 d_id, u32 s_id, u16 ox_id, u8 reason_code,
1119 u8 reason_code_expl)
1120{
1121 fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id);
1122
1123 cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT);
1124 cthdr->rev_id = CT_GS3_REVISION;
1125
1126 cthdr->reason_code = reason_code;
1127 cthdr->exp_code = reason_code_expl;
1128 return sizeof(struct ct_hdr_s);
1129}
1130
1131u16
1099fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, 1132fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
1100 u8 set_br_reg, u32 s_id, u16 ox_id) 1133 u8 set_br_reg, u32 s_id, u16 ox_id)
1101{ 1134{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index ece51ec7620b..42cd9d4da697 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -66,6 +66,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
66 case RPSC_OP_SPEED_8G: 66 case RPSC_OP_SPEED_8G:
67 return BFA_PORT_SPEED_8GBPS; 67 return BFA_PORT_SPEED_8GBPS;
68 68
69 case RPSC_OP_SPEED_16G:
70 return BFA_PORT_SPEED_16GBPS;
71
69 case RPSC_OP_SPEED_10G: 72 case RPSC_OP_SPEED_10G:
70 return BFA_PORT_SPEED_10GBPS; 73 return BFA_PORT_SPEED_10GBPS;
71 74
@@ -94,6 +97,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
94 case BFA_PORT_SPEED_8GBPS: 97 case BFA_PORT_SPEED_8GBPS:
95 return RPSC_OP_SPEED_8G; 98 return RPSC_OP_SPEED_8G;
96 99
100 case BFA_PORT_SPEED_16GBPS:
101 return RPSC_OP_SPEED_16G;
102
97 case BFA_PORT_SPEED_10GBPS: 103 case BFA_PORT_SPEED_10GBPS:
98 return RPSC_OP_SPEED_10G; 104 return RPSC_OP_SPEED_10G;
99 105
@@ -141,11 +147,11 @@ u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
141 u32 s_id, __be16 ox_id, 147 u32 s_id, __be16 ox_id,
142 wwn_t port_name, wwn_t node_name, 148 wwn_t port_name, wwn_t node_name,
143 u16 pdu_size, 149 u16 pdu_size,
144 u16 local_bb_credits); 150 u16 local_bb_credits, u8 bb_scn);
145 151
146u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, 152u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
147 u32 s_id, u16 ox_id, wwn_t port_name, 153 u32 s_id, u16 ox_id, wwn_t port_name,
148 wwn_t node_name, u16 pdu_size); 154 wwn_t node_name, u16 pdu_size, u16 bb_cr);
149 155
150enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs); 156enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
151 157
@@ -177,13 +183,17 @@ u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
177u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id, 183u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
178 u16 ox_id, u32 port_id); 184 u16 ox_id, u32 port_id);
179 185
186u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
187 u32 d_id, u32 s_id, u16 ox_id,
188 u8 reason_code, u8 reason_code_expl);
189
180u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, 190u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
181 u8 set_br_reg, u32 s_id, u16 ox_id); 191 u8 set_br_reg, u32 s_id, u16 ox_id);
182 192
183u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, 193u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
184 u32 s_id, u16 ox_id, 194 u32 s_id, u16 ox_id,
185 wwn_t port_name, wwn_t node_name, 195 wwn_t port_name, wwn_t node_name,
186 u16 pdu_size); 196 u16 pdu_size, u16 bb_cr);
187 197
188u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, 198u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
189 u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, 199 u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index c0353cdca929..a4e7951c6063 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -19,7 +19,6 @@
19#include "bfa_modules.h" 19#include "bfa_modules.h"
20 20
21BFA_TRC_FILE(HAL, FCPIM); 21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23 22
24/* 23/*
25 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
@@ -287,24 +286,16 @@ static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
287 * Compute and return memory needed by FCP(im) module. 286 * Compute and return memory needed by FCP(im) module.
288 */ 287 */
289static void 288static void
290bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 289bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
291 u32 *dm_len)
292{ 290{
293 bfa_itnim_meminfo(cfg, km_len, dm_len); 291 bfa_itnim_meminfo(cfg, km_len);
294 292
295 /* 293 /*
296 * IO memory 294 * IO memory
297 */ 295 */
298 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
299 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
300 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
301 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
302
303 *km_len += cfg->fwcfg.num_ioim_reqs * 296 *km_len += cfg->fwcfg.num_ioim_reqs *
304 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s)); 297 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305 298
306 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
307
308 /* 299 /*
309 * task management command memory 300 * task management command memory
310 */ 301 */
@@ -315,52 +306,41 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
315 306
316 307
317static void 308static void
318bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 309bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
319 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 310 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
320{ 311{
321 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 312 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
313 struct bfa_s *bfa = fcp->bfa;
322 314
323 bfa_trc(bfa, cfg->drvcfg.path_tov); 315 bfa_trc(bfa, cfg->drvcfg.path_tov);
324 bfa_trc(bfa, cfg->fwcfg.num_rports); 316 bfa_trc(bfa, cfg->fwcfg.num_rports);
325 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); 317 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
326 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); 318 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
327 319
320 fcpim->fcp = fcp;
328 fcpim->bfa = bfa; 321 fcpim->bfa = bfa;
329 fcpim->num_itnims = cfg->fwcfg.num_rports; 322 fcpim->num_itnims = cfg->fwcfg.num_rports;
330 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
331 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; 323 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
332 fcpim->path_tov = cfg->drvcfg.path_tov; 324 fcpim->path_tov = cfg->drvcfg.path_tov;
333 fcpim->delay_comp = cfg->drvcfg.delay_comp; 325 fcpim->delay_comp = cfg->drvcfg.delay_comp;
334 fcpim->profile_comp = NULL; 326 fcpim->profile_comp = NULL;
335 fcpim->profile_start = NULL; 327 fcpim->profile_start = NULL;
336 328
337 bfa_itnim_attach(fcpim, meminfo); 329 bfa_itnim_attach(fcpim);
338 bfa_tskim_attach(fcpim, meminfo); 330 bfa_tskim_attach(fcpim);
339 bfa_ioim_attach(fcpim, meminfo); 331 bfa_ioim_attach(fcpim);
340}
341
342static void
343bfa_fcpim_detach(struct bfa_s *bfa)
344{
345}
346
347static void
348bfa_fcpim_start(struct bfa_s *bfa)
349{
350} 332}
351 333
352static void 334static void
353bfa_fcpim_stop(struct bfa_s *bfa) 335bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
354{ 336{
355} 337 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
356
357static void
358bfa_fcpim_iocdisable(struct bfa_s *bfa)
359{
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361 struct bfa_itnim_s *itnim; 338 struct bfa_itnim_s *itnim;
362 struct list_head *qe, *qen; 339 struct list_head *qe, *qen;
363 340
341 /* Enqueue unused ioim resources to free_q */
342 list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
343
364 list_for_each_safe(qe, qen, &fcpim->itnim_q) { 344 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
365 itnim = (struct bfa_itnim_s *) qe; 345 itnim = (struct bfa_itnim_s *) qe;
366 bfa_itnim_iocdisable(itnim); 346 bfa_itnim_iocdisable(itnim);
@@ -370,7 +350,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
370void 350void
371bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) 351bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
372{ 352{
373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 353 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
374 354
375 fcpim->path_tov = path_tov * 1000; 355 fcpim->path_tov = path_tov * 1000;
376 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX) 356 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
@@ -380,15 +360,87 @@ bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
380u16 360u16
381bfa_fcpim_path_tov_get(struct bfa_s *bfa) 361bfa_fcpim_path_tov_get(struct bfa_s *bfa)
382{ 362{
383 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 363 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
384 364
385 return fcpim->path_tov / 1000; 365 return fcpim->path_tov / 1000;
386} 366}
387 367
368#define bfa_fcpim_add_iostats(__l, __r, __stats) \
369 (__l->__stats += __r->__stats)
370
371void
372bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
373 struct bfa_itnim_iostats_s *rstats)
374{
375 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
376 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
377 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
378 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
379 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
380 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
381 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
382 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
383 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
384 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
385 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
386 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
387 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
388 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
389 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
390 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
391 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
392 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
393 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
394 bfa_fcpim_add_iostats(lstats, rstats, onlines);
395 bfa_fcpim_add_iostats(lstats, rstats, offlines);
396 bfa_fcpim_add_iostats(lstats, rstats, creates);
397 bfa_fcpim_add_iostats(lstats, rstats, deletes);
398 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
399 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
400 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
401 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
402 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
403 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
404 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
405 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
406 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
407 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
408 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
409 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
410 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
411 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
412 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
413 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
414 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
415 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
416 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
417 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
418 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
419}
420
421bfa_status_t
422bfa_fcpim_port_iostats(struct bfa_s *bfa,
423 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
424{
425 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
426 struct list_head *qe, *qen;
427 struct bfa_itnim_s *itnim;
428
429 /* accumulate IO stats from itnim */
430 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
431 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
432 itnim = (struct bfa_itnim_s *) qe;
433 if (itnim->rport->rport_info.lp_tag != lp_tag)
434 continue;
435 bfa_fcpim_add_stats(stats, &(itnim->stats));
436 }
437 return BFA_STATUS_OK;
438}
439
388u16 440u16
389bfa_fcpim_qdepth_get(struct bfa_s *bfa) 441bfa_fcpim_qdepth_get(struct bfa_s *bfa)
390{ 442{
391 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 443 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
392 444
393 return fcpim->q_depth; 445 return fcpim->q_depth;
394} 446}
@@ -990,8 +1042,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
990} 1042}
991 1043
992void 1044void
993bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 1045bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
994 u32 *dm_len)
995{ 1046{
996 /* 1047 /*
997 * ITN memory 1048 * ITN memory
@@ -1000,15 +1051,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1000} 1051}
1001 1052
1002void 1053void
1003bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 1054bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1004{ 1055{
1005 struct bfa_s *bfa = fcpim->bfa; 1056 struct bfa_s *bfa = fcpim->bfa;
1057 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
1006 struct bfa_itnim_s *itnim; 1058 struct bfa_itnim_s *itnim;
1007 int i, j; 1059 int i, j;
1008 1060
1009 INIT_LIST_HEAD(&fcpim->itnim_q); 1061 INIT_LIST_HEAD(&fcpim->itnim_q);
1010 1062
1011 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo); 1063 itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1012 fcpim->itnim_arr = itnim; 1064 fcpim->itnim_arr = itnim;
1013 1065
1014 for (i = 0; i < fcpim->num_itnims; i++, itnim++) { 1066 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
@@ -1030,7 +1082,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); 1082 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 } 1083 }
1032 1084
1033 bfa_meminfo_kva(minfo) = (u8 *) itnim; 1085 bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1034} 1086}
1035 1087
1036void 1088void
@@ -1043,7 +1095,7 @@ bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1043static bfa_boolean_t 1095static bfa_boolean_t
1044bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) 1096bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1045{ 1097{
1046 struct bfi_itnim_create_req_s *m; 1098 struct bfi_itn_create_req_s *m;
1047 1099
1048 itnim->msg_no++; 1100 itnim->msg_no++;
1049 1101
@@ -1056,8 +1108,8 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1056 return BFA_FALSE; 1108 return BFA_FALSE;
1057 } 1109 }
1058 1110
1059 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ, 1111 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1060 bfa_lpuid(itnim->bfa)); 1112 bfa_fn_lpu(itnim->bfa));
1061 m->fw_handle = itnim->rport->fw_handle; 1113 m->fw_handle = itnim->rport->fw_handle;
1062 m->class = FC_CLASS_3; 1114 m->class = FC_CLASS_3;
1063 m->seq_rec = itnim->seq_rec; 1115 m->seq_rec = itnim->seq_rec;
@@ -1067,14 +1119,14 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1067 /* 1119 /*
1068 * queue I/O message to firmware 1120 * queue I/O message to firmware
1069 */ 1121 */
1070 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1122 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1071 return BFA_TRUE; 1123 return BFA_TRUE;
1072} 1124}
1073 1125
1074static bfa_boolean_t 1126static bfa_boolean_t
1075bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) 1127bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1076{ 1128{
1077 struct bfi_itnim_delete_req_s *m; 1129 struct bfi_itn_delete_req_s *m;
1078 1130
1079 /* 1131 /*
1080 * check for room in queue to send request now 1132 * check for room in queue to send request now
@@ -1085,15 +1137,15 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1085 return BFA_FALSE; 1137 return BFA_FALSE;
1086 } 1138 }
1087 1139
1088 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ, 1140 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1089 bfa_lpuid(itnim->bfa)); 1141 bfa_fn_lpu(itnim->bfa));
1090 m->fw_handle = itnim->rport->fw_handle; 1142 m->fw_handle = itnim->rport->fw_handle;
1091 bfa_stats(itnim, fw_delete); 1143 bfa_stats(itnim, fw_delete);
1092 1144
1093 /* 1145 /*
1094 * queue I/O message to firmware 1146 * queue I/O message to firmware
1095 */ 1147 */
1096 bfa_reqq_produce(itnim->bfa, itnim->reqq); 1148 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1097 return BFA_TRUE; 1149 return BFA_TRUE;
1098} 1150}
1099 1151
@@ -1224,7 +1276,7 @@ bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1224static void 1276static void
1225bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) 1277bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1226{ 1278{
1227 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa); 1279 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1228 fcpim->del_itn_stats.del_itn_iocomp_aborted += 1280 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1229 itnim->stats.iocomp_aborted; 1281 itnim->stats.iocomp_aborted;
1230 fcpim->del_itn_stats.del_itn_iocomp_timedout += 1282 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
@@ -1250,8 +1302,8 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1250void 1302void
1251bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 1303bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1252{ 1304{
1253 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 1305 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1254 union bfi_itnim_i2h_msg_u msg; 1306 union bfi_itn_i2h_msg_u msg;
1255 struct bfa_itnim_s *itnim; 1307 struct bfa_itnim_s *itnim;
1256 1308
1257 bfa_trc(bfa, m->mhdr.msg_id); 1309 bfa_trc(bfa, m->mhdr.msg_id);
@@ -1259,7 +1311,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1259 msg.msg = m; 1311 msg.msg = m;
1260 1312
1261 switch (m->mhdr.msg_id) { 1313 switch (m->mhdr.msg_id) {
1262 case BFI_ITNIM_I2H_CREATE_RSP: 1314 case BFI_ITN_I2H_CREATE_RSP:
1263 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1315 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1264 msg.create_rsp->bfa_handle); 1316 msg.create_rsp->bfa_handle);
1265 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); 1317 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
@@ -1267,7 +1319,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); 1319 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1268 break; 1320 break;
1269 1321
1270 case BFI_ITNIM_I2H_DELETE_RSP: 1322 case BFI_ITN_I2H_DELETE_RSP:
1271 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1323 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1272 msg.delete_rsp->bfa_handle); 1324 msg.delete_rsp->bfa_handle);
1273 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); 1325 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
@@ -1275,7 +1327,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); 1327 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1276 break; 1328 break;
1277 1329
1278 case BFI_ITNIM_I2H_SLER_EVENT: 1330 case BFI_ITN_I2H_SLER_EVENT:
1279 itnim = BFA_ITNIM_FROM_TAG(fcpim, 1331 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1280 msg.sler_event->bfa_handle); 1332 msg.sler_event->bfa_handle);
1281 bfa_stats(itnim, sler_events); 1333 bfa_stats(itnim, sler_events);
@@ -1295,9 +1347,11 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1295struct bfa_itnim_s * 1347struct bfa_itnim_s *
1296bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) 1348bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1297{ 1349{
1298 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 1350 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1299 struct bfa_itnim_s *itnim; 1351 struct bfa_itnim_s *itnim;
1300 1352
1353 bfa_itn_create(bfa, rport, bfa_itnim_isr);
1354
1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); 1355 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1302 WARN_ON(itnim->rport != rport); 1356 WARN_ON(itnim->rport != rport);
1303 1357
@@ -1991,7 +2045,8 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1991 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && 2045 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
1992 m->sns_len) { 2046 m->sns_len) {
1993 sns_len = m->sns_len; 2047 sns_len = m->sns_len;
1994 snsinfo = ioim->iosp->snsinfo; 2048 snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2049 ioim->iotag);
1995 } 2050 }
1996 2051
1997 /* 2052 /*
@@ -2189,12 +2244,12 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2189 */ 2244 */
2190 switch (m->cmnd.iodir) { 2245 switch (m->cmnd.iodir) {
2191 case FCP_IODIR_READ: 2246 case FCP_IODIR_READ:
2192 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa)); 2247 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2193 bfa_stats(itnim, input_reqs); 2248 bfa_stats(itnim, input_reqs);
2194 ioim->itnim->stats.rd_throughput += fcp_dl; 2249 ioim->itnim->stats.rd_throughput += fcp_dl;
2195 break; 2250 break;
2196 case FCP_IODIR_WRITE: 2251 case FCP_IODIR_WRITE:
2197 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa)); 2252 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2198 bfa_stats(itnim, output_reqs); 2253 bfa_stats(itnim, output_reqs);
2199 ioim->itnim->stats.wr_throughput += fcp_dl; 2254 ioim->itnim->stats.wr_throughput += fcp_dl;
2200 break; 2255 break;
@@ -2202,16 +2257,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2202 bfa_stats(itnim, input_reqs); 2257 bfa_stats(itnim, input_reqs);
2203 bfa_stats(itnim, output_reqs); 2258 bfa_stats(itnim, output_reqs);
2204 default: 2259 default:
2205 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); 2260 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2206 } 2261 }
2207 if (itnim->seq_rec || 2262 if (itnim->seq_rec ||
2208 (scsi_bufflen(cmnd) & (sizeof(u32) - 1))) 2263 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2209 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); 2264 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2210 2265
2211 /* 2266 /*
2212 * queue I/O message to firmware 2267 * queue I/O message to firmware
2213 */ 2268 */
2214 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2269 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2215 return BFA_TRUE; 2270 return BFA_TRUE;
2216} 2271}
2217 2272
@@ -2269,14 +2324,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2269 else 2324 else
2270 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; 2325 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2271 2326
2272 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); 2327 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2273 m->io_tag = cpu_to_be16(ioim->iotag); 2328 m->io_tag = cpu_to_be16(ioim->iotag);
2274 m->abort_tag = ++ioim->abort_tag; 2329 m->abort_tag = ++ioim->abort_tag;
2275 2330
2276 /* 2331 /*
2277 * queue I/O message to firmware 2332 * queue I/O message to firmware
2278 */ 2333 */
2279 bfa_reqq_produce(ioim->bfa, ioim->reqq); 2334 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2280 return BFA_TRUE; 2335 return BFA_TRUE;
2281} 2336}
2282 2337
@@ -2360,46 +2415,32 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2360 * Memory allocation and initialization. 2415 * Memory allocation and initialization.
2361 */ 2416 */
2362void 2417void
2363bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 2418bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2364{ 2419{
2365 struct bfa_ioim_s *ioim; 2420 struct bfa_ioim_s *ioim;
2421 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
2366 struct bfa_ioim_sp_s *iosp; 2422 struct bfa_ioim_sp_s *iosp;
2367 u16 i; 2423 u16 i;
2368 u8 *snsinfo;
2369 u32 snsbufsz;
2370 2424
2371 /* 2425 /*
2372 * claim memory first 2426 * claim memory first
2373 */ 2427 */
2374 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); 2428 ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2375 fcpim->ioim_arr = ioim; 2429 fcpim->ioim_arr = ioim;
2376 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs); 2430 bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2377 2431
2378 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo); 2432 iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2379 fcpim->ioim_sp_arr = iosp; 2433 fcpim->ioim_sp_arr = iosp;
2380 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); 2434 bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2381
2382 /*
2383 * Claim DMA memory for per IO sense data.
2384 */
2385 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2386 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2387 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2388
2389 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2390 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2391 snsinfo = fcpim->snsbase.kva;
2392 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2393 2435
2394 /* 2436 /*
2395 * Initialize ioim free queues 2437 * Initialize ioim free queues
2396 */ 2438 */
2397 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2398 INIT_LIST_HEAD(&fcpim->ioim_resfree_q); 2439 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2399 INIT_LIST_HEAD(&fcpim->ioim_comp_q); 2440 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2400 2441
2401 for (i = 0; i < fcpim->num_ioim_reqs; 2442 for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2402 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) { 2443 i++, ioim++, iosp++) {
2403 /* 2444 /*
2404 * initialize IOIM 2445 * initialize IOIM
2405 */ 2446 */
@@ -2408,22 +2449,19 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2408 ioim->bfa = fcpim->bfa; 2449 ioim->bfa = fcpim->bfa;
2409 ioim->fcpim = fcpim; 2450 ioim->fcpim = fcpim;
2410 ioim->iosp = iosp; 2451 ioim->iosp = iosp;
2411 iosp->snsinfo = snsinfo;
2412 INIT_LIST_HEAD(&ioim->sgpg_q); 2452 INIT_LIST_HEAD(&ioim->sgpg_q);
2413 bfa_reqq_winit(&ioim->iosp->reqq_wait, 2453 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2414 bfa_ioim_qresume, ioim); 2454 bfa_ioim_qresume, ioim);
2415 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, 2455 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2416 bfa_ioim_sgpg_alloced, ioim); 2456 bfa_ioim_sgpg_alloced, ioim);
2417 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); 2457 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2418
2419 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2420 } 2458 }
2421} 2459}
2422 2460
2423void 2461void
2424bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 2462bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2425{ 2463{
2426 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2464 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2427 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; 2465 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2428 struct bfa_ioim_s *ioim; 2466 struct bfa_ioim_s *ioim;
2429 u16 iotag; 2467 u16 iotag;
@@ -2507,7 +2545,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2507void 2545void
2508bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 2546bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2509{ 2547{
2510 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2548 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2511 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; 2549 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2512 struct bfa_ioim_s *ioim; 2550 struct bfa_ioim_s *ioim;
2513 u16 iotag; 2551 u16 iotag;
@@ -2573,18 +2611,21 @@ struct bfa_ioim_s *
2573bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, 2611bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2574 struct bfa_itnim_s *itnim, u16 nsges) 2612 struct bfa_itnim_s *itnim, u16 nsges)
2575{ 2613{
2576 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 2614 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2577 struct bfa_ioim_s *ioim; 2615 struct bfa_ioim_s *ioim;
2616 struct bfa_iotag_s *iotag = NULL;
2578 2617
2579 /* 2618 /*
2580 * alocate IOIM resource 2619 * alocate IOIM resource
2581 */ 2620 */
2582 bfa_q_deq(&fcpim->ioim_free_q, &ioim); 2621 bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2583 if (!ioim) { 2622 if (!iotag) {
2584 bfa_stats(itnim, no_iotags); 2623 bfa_stats(itnim, no_iotags);
2585 return NULL; 2624 return NULL;
2586 } 2625 }
2587 2626
2627 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2628
2588 ioim->dio = dio; 2629 ioim->dio = dio;
2589 ioim->itnim = itnim; 2630 ioim->itnim = itnim;
2590 ioim->nsges = nsges; 2631 ioim->nsges = nsges;
@@ -2601,7 +2642,8 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2601void 2642void
2602bfa_ioim_free(struct bfa_ioim_s *ioim) 2643bfa_ioim_free(struct bfa_ioim_s *ioim)
2603{ 2644{
2604 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; 2645 struct bfa_fcpim_s *fcpim = ioim->fcpim;
2646 struct bfa_iotag_s *iotag;
2605 2647
2606 if (ioim->nsgpgs > 0) 2648 if (ioim->nsgpgs > 0)
2607 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); 2649 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
@@ -2610,8 +2652,17 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
2610 fcpim->ios_active--; 2652 fcpim->ios_active--;
2611 2653
2612 ioim->iotag &= BFA_IOIM_IOTAG_MASK; 2654 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2655
2656 WARN_ON(!(ioim->iotag <
2657 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2658 iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2659
2660 if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2661 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2662 else
2663 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2664
2613 list_del(&ioim->qe); 2665 list_del(&ioim->qe);
2614 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2615} 2666}
2616 2667
2617void 2668void
@@ -3021,7 +3072,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
3021 * build i/o request message next 3072 * build i/o request message next
3022 */ 3073 */
3023 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, 3074 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3024 bfa_lpuid(tskim->bfa)); 3075 bfa_fn_lpu(tskim->bfa));
3025 3076
3026 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3077 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3027 m->itn_fhdl = tskim->itnim->rport->fw_handle; 3078 m->itn_fhdl = tskim->itnim->rport->fw_handle;
@@ -3032,7 +3083,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
3032 /* 3083 /*
3033 * queue I/O message to firmware 3084 * queue I/O message to firmware
3034 */ 3085 */
3035 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3086 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3036 return BFA_TRUE; 3087 return BFA_TRUE;
3037} 3088}
3038 3089
@@ -3056,14 +3107,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3056 * build i/o request message next 3107 * build i/o request message next
3057 */ 3108 */
3058 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, 3109 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3059 bfa_lpuid(tskim->bfa)); 3110 bfa_fn_lpu(tskim->bfa));
3060 3111
3061 m->tsk_tag = cpu_to_be16(tskim->tsk_tag); 3112 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3062 3113
3063 /* 3114 /*
3064 * queue I/O message to firmware 3115 * queue I/O message to firmware
3065 */ 3116 */
3066 bfa_reqq_produce(tskim->bfa, itnim->reqq); 3117 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3067 return BFA_TRUE; 3118 return BFA_TRUE;
3068} 3119}
3069 3120
@@ -3129,14 +3180,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3129 * Memory allocation and initialization. 3180 * Memory allocation and initialization.
3130 */ 3181 */
3131void 3182void
3132bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) 3183bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3133{ 3184{
3134 struct bfa_tskim_s *tskim; 3185 struct bfa_tskim_s *tskim;
3186 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
3135 u16 i; 3187 u16 i;
3136 3188
3137 INIT_LIST_HEAD(&fcpim->tskim_free_q); 3189 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3190 INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3138 3191
3139 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo); 3192 tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3140 fcpim->tskim_arr = tskim; 3193 fcpim->tskim_arr = tskim;
3141 3194
3142 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { 3195 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
@@ -3155,13 +3208,13 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3155 list_add_tail(&tskim->qe, &fcpim->tskim_free_q); 3208 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3156 } 3209 }
3157 3210
3158 bfa_meminfo_kva(minfo) = (u8 *) tskim; 3211 bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3159} 3212}
3160 3213
3161void 3214void
3162bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) 3215bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3163{ 3216{
3164 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3217 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3165 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; 3218 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3166 struct bfa_tskim_s *tskim; 3219 struct bfa_tskim_s *tskim;
3167 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); 3220 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
@@ -3188,7 +3241,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3188struct bfa_tskim_s * 3241struct bfa_tskim_s *
3189bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) 3242bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3190{ 3243{
3191 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); 3244 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3192 struct bfa_tskim_s *tskim; 3245 struct bfa_tskim_s *tskim;
3193 3246
3194 bfa_q_deq(&fcpim->tskim_free_q, &tskim); 3247 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
@@ -3233,3 +3286,214 @@ bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3233 list_add_tail(&tskim->qe, &itnim->tsk_q); 3286 list_add_tail(&tskim->qe, &itnim->tsk_q);
3234 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); 3287 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3235} 3288}
3289
3290void
3291bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3292{
3293 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3294 struct list_head *qe;
3295 int i;
3296
3297 for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3298 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3299 list_add_tail(qe, &fcpim->tskim_unused_q);
3300 }
3301}
3302
3303/* BFA FCP module - parent module for fcpim */
3304
3305BFA_MODULE(fcp);
3306
3307static void
3308bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3309 struct bfa_s *bfa)
3310{
3311 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3312 struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3313 struct bfa_mem_dma_s *seg_ptr;
3314 u16 nsegs, idx, per_seg_ios, num_io_req;
3315 u32 km_len = 0;
3316
3317 /*
3318 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3319 * So if the values are non zero, adjust them appropriately.
3320 */
3321 if (cfg->fwcfg.num_ioim_reqs &&
3322 cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3323 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3324 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3325 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3326
3327 if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3328 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3329
3330 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3331 if (num_io_req > BFA_IO_MAX) {
3332 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3333 cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3334 cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3335 } else if (cfg->fwcfg.num_fwtio_reqs)
3336 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3337 else
3338 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3339 }
3340
3341 bfa_fcpim_meminfo(cfg, &km_len);
3342
3343 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3344 km_len += num_io_req * sizeof(struct bfa_iotag_s);
3345 km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3346
3347 /* dma memory */
3348 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3349 per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3350
3351 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3352 if (num_io_req >= per_seg_ios) {
3353 num_io_req -= per_seg_ios;
3354 bfa_mem_dma_setup(minfo, seg_ptr,
3355 per_seg_ios * BFI_IOIM_SNSLEN);
3356 } else
3357 bfa_mem_dma_setup(minfo, seg_ptr,
3358 num_io_req * BFI_IOIM_SNSLEN);
3359 }
3360
3361 /* kva memory */
3362 bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3363}
3364
3365static void
3366bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3367 struct bfa_pcidev_s *pcidev)
3368{
3369 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3370 struct bfa_mem_dma_s *seg_ptr;
3371 u16 idx, nsegs, num_io_req;
3372
3373 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3374 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
3375 fcp->num_itns = cfg->fwcfg.num_rports;
3376 fcp->bfa = bfa;
3377
3378 /*
3379 * Setup the pool of snsbase addr's, that is passed to fw as
3380 * part of bfi_iocfc_cfg_s.
3381 */
3382 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3383 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3384
3385 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3386
3387 if (!bfa_mem_dma_virt(seg_ptr))
3388 break;
3389
3390 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3391 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3392 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3393 }
3394
3395 bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3396
3397 bfa_iotag_attach(fcp);
3398
3399 fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3400 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3401 (fcp->num_itns * sizeof(struct bfa_itn_s));
3402 memset(fcp->itn_arr, 0,
3403 (fcp->num_itns * sizeof(struct bfa_itn_s)));
3404}
3405
3406static void
3407bfa_fcp_detach(struct bfa_s *bfa)
3408{
3409}
3410
3411static void
3412bfa_fcp_start(struct bfa_s *bfa)
3413{
3414}
3415
3416static void
3417bfa_fcp_stop(struct bfa_s *bfa)
3418{
3419}
3420
3421static void
3422bfa_fcp_iocdisable(struct bfa_s *bfa)
3423{
3424 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3425
3426 /* Enqueue unused ioim resources to free_q */
3427 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3428
3429 bfa_fcpim_iocdisable(fcp);
3430}
3431
3432void
3433bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
3434{
3435 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
3436 struct list_head *qe;
3437 int i;
3438
3439 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3440 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3441 list_add_tail(qe, &mod->iotag_unused_q);
3442 }
3443}
3444
3445void
3446bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3447 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3448{
3449 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3450 struct bfa_itn_s *itn;
3451
3452 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3453 itn->isr = isr;
3454}
3455
3456/*
3457 * Itn interrupt processing.
3458 */
3459void
3460bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3461{
3462 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3463 union bfi_itn_i2h_msg_u msg;
3464 struct bfa_itn_s *itn;
3465
3466 msg.msg = m;
3467 itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3468
3469 if (itn->isr)
3470 itn->isr(bfa, m);
3471 else
3472 WARN_ON(1);
3473}
3474
3475void
3476bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3477{
3478 struct bfa_iotag_s *iotag;
3479 u16 num_io_req, i;
3480
3481 iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3482 fcp->iotag_arr = iotag;
3483
3484 INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3485 INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3486 INIT_LIST_HEAD(&fcp->iotag_unused_q);
3487
3488 num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3489 for (i = 0; i < num_io_req; i++, iotag++) {
3490 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3491 iotag->tag = i;
3492 if (i < fcp->num_ioim_reqs)
3493 list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3494 else
3495 list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3496 }
3497
3498 bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3499}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1e38dade8423..57b695ad4ee5 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -24,6 +24,34 @@
24#include "bfa_defs_svc.h" 24#include "bfa_defs_svc.h"
25#include "bfa_cs.h" 25#include "bfa_cs.h"
26 26
27/* FCP module related definitions */
28#define BFA_IO_MAX BFI_IO_MAX
29#define BFA_FWTIO_MAX 2000
30
31struct bfa_fcp_mod_s;
32struct bfa_iotag_s {
33 struct list_head qe; /* queue element */
34 u16 tag; /* FW IO tag */
35};
36
37struct bfa_itn_s {
38 bfa_isr_func_t isr;
39};
40
41void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
42 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
43void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
44void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
45void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
46
47#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod)
48#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
49#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \
50 (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
51#define BFA_ITN_FROM_TAG(_fcp, _tag) \
52 ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
53#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
54 bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)
27 55
28#define BFA_ITNIM_MIN 32 56#define BFA_ITNIM_MIN 32
29#define BFA_ITNIM_MAX 1024 57#define BFA_ITNIM_MAX 1024
@@ -75,25 +103,24 @@ struct bfad_tskim_s;
75 103
76typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 104typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
77 105
78struct bfa_fcpim_mod_s { 106struct bfa_fcpim_s {
79 struct bfa_s *bfa; 107 struct bfa_s *bfa;
108 struct bfa_fcp_mod_s *fcp;
80 struct bfa_itnim_s *itnim_arr; 109 struct bfa_itnim_s *itnim_arr;
81 struct bfa_ioim_s *ioim_arr; 110 struct bfa_ioim_s *ioim_arr;
82 struct bfa_ioim_sp_s *ioim_sp_arr; 111 struct bfa_ioim_sp_s *ioim_sp_arr;
83 struct bfa_tskim_s *tskim_arr; 112 struct bfa_tskim_s *tskim_arr;
84 struct bfa_dma_s snsbase;
85 int num_itnims; 113 int num_itnims;
86 int num_ioim_reqs;
87 int num_tskim_reqs; 114 int num_tskim_reqs;
88 u32 path_tov; 115 u32 path_tov;
89 u16 q_depth; 116 u16 q_depth;
90 u8 reqq; /* Request queue to be used */ 117 u8 reqq; /* Request queue to be used */
91 u8 rsvd; 118 u8 rsvd;
92 struct list_head itnim_q; /* queue of active itnim */ 119 struct list_head itnim_q; /* queue of active itnim */
93 struct list_head ioim_free_q; /* free IO resources */
94 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 120 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
95 struct list_head ioim_comp_q; /* IO global comp Q */ 121 struct list_head ioim_comp_q; /* IO global comp Q */
96 struct list_head tskim_free_q; 122 struct list_head tskim_free_q;
123 struct list_head tskim_unused_q; /* Unused tskim Q */
97 u32 ios_active; /* current active IOs */ 124 u32 ios_active; /* current active IOs */
98 u32 delay_comp; 125 u32 delay_comp;
99 struct bfa_fcpim_del_itn_stats_s del_itn_stats; 126 struct bfa_fcpim_del_itn_stats_s del_itn_stats;
@@ -104,6 +131,25 @@ struct bfa_fcpim_mod_s {
104 bfa_fcpim_profile_t profile_start; 131 bfa_fcpim_profile_t profile_start;
105}; 132};
106 133
134/* Max FCP dma segs required */
135#define BFA_FCP_DMA_SEGS BFI_IOIM_SNSBUF_SEGS
136
137struct bfa_fcp_mod_s {
138 struct bfa_s *bfa;
139 struct list_head iotag_ioim_free_q; /* free IO resources */
140 struct list_head iotag_tio_free_q; /* free IO resources */
141 struct list_head iotag_unused_q; /* unused IO resources*/
142 struct bfa_iotag_s *iotag_arr;
143 struct bfa_itn_s *itn_arr;
144 int num_ioim_reqs;
145 int num_fwtio_reqs;
146 int num_itns;
147 struct bfa_dma_s snsbase[BFA_FCP_DMA_SEGS];
148 struct bfa_fcpim_s fcpim;
149 struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS];
150 struct bfa_mem_kva_s kva_seg;
151};
152
107/* 153/*
108 * BFA IO (initiator mode) 154 * BFA IO (initiator mode)
109 */ 155 */
@@ -111,7 +157,7 @@ struct bfa_ioim_s {
111 struct list_head qe; /* queue elememt */ 157 struct list_head qe; /* queue elememt */
112 bfa_sm_t sm; /* BFA ioim state machine */ 158 bfa_sm_t sm; /* BFA ioim state machine */
113 struct bfa_s *bfa; /* BFA module */ 159 struct bfa_s *bfa; /* BFA module */
114 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ 160 struct bfa_fcpim_s *fcpim; /* parent fcpim module */
115 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ 161 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
116 struct bfad_ioim_s *dio; /* driver IO handle */ 162 struct bfad_ioim_s *dio; /* driver IO handle */
117 u16 iotag; /* FWI IO tag */ 163 u16 iotag; /* FWI IO tag */
@@ -129,7 +175,6 @@ struct bfa_ioim_s {
129 175
130struct bfa_ioim_sp_s { 176struct bfa_ioim_sp_s {
131 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ 177 struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
132 u8 *snsinfo; /* sense info for this IO */
133 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ 178 struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
134 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 179 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
135 bfa_boolean_t abort_explicit; /* aborted by OS */ 180 bfa_boolean_t abort_explicit; /* aborted by OS */
@@ -143,7 +188,7 @@ struct bfa_tskim_s {
143 struct list_head qe; 188 struct list_head qe;
144 bfa_sm_t sm; 189 bfa_sm_t sm;
145 struct bfa_s *bfa; /* BFA module */ 190 struct bfa_s *bfa; /* BFA module */
146 struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ 191 struct bfa_fcpim_s *fcpim; /* parent fcpim module */
147 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ 192 struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
148 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ 193 struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
149 bfa_boolean_t notify; /* notify itnim on TM comp */ 194 bfa_boolean_t notify; /* notify itnim on TM comp */
@@ -182,13 +227,13 @@ struct bfa_itnim_s {
182 struct bfa_wc_s wc; /* waiting counter */ 227 struct bfa_wc_s wc; /* waiting counter */
183 struct bfa_timer_s timer; /* pending IO TOV */ 228 struct bfa_timer_s timer; /* pending IO TOV */
184 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 229 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
185 struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ 230 struct bfa_fcpim_s *fcpim; /* fcpim module */
186 struct bfa_itnim_iostats_s stats; 231 struct bfa_itnim_iostats_s stats;
187 struct bfa_itnim_ioprofile_s ioprofile; 232 struct bfa_itnim_ioprofile_s ioprofile;
188}; 233};
189 234
190#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) 235#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
191#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) 236#define BFA_FCPIM(_hal) (&(_hal)->modules.fcp_mod.fcpim)
192#define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK) 237#define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK)
193#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ 238#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
194 (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)]) 239 (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
@@ -196,9 +241,9 @@ struct bfa_itnim_s {
196 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) 241 (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
197 242
198#define bfa_io_profile_start_time(_bfa) \ 243#define bfa_io_profile_start_time(_bfa) \
199 (_bfa->modules.fcpim_mod.io_profile_start_time) 244 ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time)
200#define bfa_fcpim_get_io_profile(_bfa) \ 245#define bfa_fcpim_get_io_profile(_bfa) \
201 (_bfa->modules.fcpim_mod.io_profile) 246 ((_bfa)->modules.fcp_mod.fcpim.io_profile)
202#define bfa_ioim_update_iotag(__ioim) do { \ 247#define bfa_ioim_update_iotag(__ioim) do { \
203 uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \ 248 uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \
204 k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \ 249 k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \
@@ -217,8 +262,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
217/* 262/*
218 * function prototypes 263 * function prototypes
219 */ 264 */
220void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, 265void bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
221 struct bfa_meminfo_s *minfo);
222void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 266void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
223void bfa_ioim_good_comp_isr(struct bfa_s *bfa, 267void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
224 struct bfi_msg_s *msg); 268 struct bfi_msg_s *msg);
@@ -228,18 +272,15 @@ void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
228void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim); 272void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
229void bfa_ioim_tov(struct bfa_ioim_s *ioim); 273void bfa_ioim_tov(struct bfa_ioim_s *ioim);
230 274
231void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, 275void bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
232 struct bfa_meminfo_s *minfo);
233void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 276void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
234void bfa_tskim_iodone(struct bfa_tskim_s *tskim); 277void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
235void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); 278void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
236void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); 279void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
280void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
237 281
238void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 282void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
239 u32 *dm_len); 283void bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
240void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
241 struct bfa_meminfo_s *minfo);
242void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
243void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim); 284void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
244void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 285void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
245void bfa_itnim_iodone(struct bfa_itnim_s *itnim); 286void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
@@ -252,13 +293,17 @@ bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
252void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); 293void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
253u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); 294u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
254u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); 295u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
296bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
297 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
298void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
299 struct bfa_itnim_iostats_s *itnim_stats);
255 300
256#define bfa_fcpim_ioredirect_enabled(__bfa) \ 301#define bfa_fcpim_ioredirect_enabled(__bfa) \
257 (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect) 302 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
258 303
259#define bfa_fcpim_get_next_reqq(__bfa, __qid) \ 304#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
260{ \ 305{ \
261 struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \ 306 struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa); \
262 __fcpim->reqq++; \ 307 __fcpim->reqq++; \
263 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \ 308 __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
264 *(__qid) = __fcpim->reqq; \ 309 *(__qid) = __fcpim->reqq; \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 9b43ca4b6778..a9b22bc48bc3 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -92,25 +92,49 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
92void 92void
93bfa_fcs_init(struct bfa_fcs_s *fcs) 93bfa_fcs_init(struct bfa_fcs_s *fcs)
94{ 94{
95 int i, npbc_vports; 95 int i;
96 struct bfa_fcs_mod_s *mod; 96 struct bfa_fcs_mod_s *mod;
97 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
98 97
99 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 98 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
100 mod = &fcs_modules[i]; 99 mod = &fcs_modules[i];
101 if (mod->modinit) 100 if (mod->modinit)
102 mod->modinit(fcs); 101 mod->modinit(fcs);
103 } 102 }
103}
104
105/*
106 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
107 * with values learned during bfa_init firmware GETATTR REQ.
108 */
109void
110bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
111{
112 struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
113 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
114 struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc;
115
116 port_cfg->nwwn = ioc->attr->nwwn;
117 port_cfg->pwwn = ioc->attr->pwwn;
118}
119
120/*
121 * fcs pbc vport initialization
122 */
123void
124bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs)
125{
126 int i, npbc_vports;
127 struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
128
104 /* Initialize pbc vports */ 129 /* Initialize pbc vports */
105 if (!fcs->min_cfg) { 130 if (!fcs->min_cfg) {
106 npbc_vports = 131 npbc_vports =
107 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); 132 bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
108 for (i = 0; i < npbc_vports; i++) 133 for (i = 0; i < npbc_vports; i++)
109 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); 134 bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
110 } 135 }
111} 136}
112 137
113
114/* 138/*
115 * brief 139 * brief
116 * FCS driver details initialization. 140 * FCS driver details initialization.
@@ -168,11 +192,14 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
168#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ 192#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
169 193
170#define bfa_fcs_fabric_set_opertype(__fabric) do { \ 194#define bfa_fcs_fabric_set_opertype(__fabric) do { \
171 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ 195 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
172 == BFA_PORT_TOPOLOGY_P2P) \ 196 == BFA_PORT_TOPOLOGY_P2P) { \
197 if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \
173 (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ 198 (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \
174 else \ 199 else \
175 (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ 200 (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \
201 } else \
202 (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \
176} while (0) 203} while (0)
177 204
178/* 205/*
@@ -196,6 +223,9 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
196 u32 rsp_len, 223 u32 rsp_len,
197 u32 resid_len, 224 u32 resid_len,
198 struct fchs_s *rspfchs); 225 struct fchs_s *rspfchs);
226static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric);
227static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled(
228 struct bfa_fcs_fabric_s *fabric);
199 229
200static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, 230static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
201 enum bfa_fcs_fabric_event event); 231 enum bfa_fcs_fabric_event event);
@@ -269,8 +299,8 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
269 break; 299 break;
270 300
271 case BFA_FCS_FABRIC_SM_DELETE: 301 case BFA_FCS_FABRIC_SM_DELETE:
272 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); 302 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
273 bfa_wc_down(&fabric->fcs->wc); 303 bfa_fcs_fabric_delete(fabric);
274 break; 304 break;
275 305
276 default: 306 default:
@@ -322,7 +352,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
322 case BFA_FCS_FABRIC_SM_CONT_OP: 352 case BFA_FCS_FABRIC_SM_CONT_OP:
323 353
324 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, 354 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
325 fabric->bb_credit); 355 fabric->bb_credit,
356 bfa_fcs_fabric_oper_bbscn(fabric));
326 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; 357 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
327 358
328 if (fabric->auth_reqd && fabric->is_auth) { 359 if (fabric->auth_reqd && fabric->is_auth) {
@@ -350,7 +381,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
350 case BFA_FCS_FABRIC_SM_NO_FABRIC: 381 case BFA_FCS_FABRIC_SM_NO_FABRIC:
351 fabric->fab_type = BFA_FCS_FABRIC_N2N; 382 fabric->fab_type = BFA_FCS_FABRIC_N2N;
352 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, 383 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
353 fabric->bb_credit); 384 fabric->bb_credit,
385 bfa_fcs_fabric_oper_bbscn(fabric));
354 bfa_fcs_fabric_notify_online(fabric); 386 bfa_fcs_fabric_notify_online(fabric);
355 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); 387 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
356 break; 388 break;
@@ -518,7 +550,11 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
518 case BFA_FCS_FABRIC_SM_NO_FABRIC: 550 case BFA_FCS_FABRIC_SM_NO_FABRIC:
519 bfa_trc(fabric->fcs, fabric->bb_credit); 551 bfa_trc(fabric->fcs, fabric->bb_credit);
520 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, 552 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
521 fabric->bb_credit); 553 fabric->bb_credit,
554 bfa_fcs_fabric_oper_bbscn(fabric));
555 break;
556
557 case BFA_FCS_FABRIC_SM_RETRY_OP:
522 break; 558 break;
523 559
524 default: 560 default:
@@ -764,6 +800,10 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
764 800
765 case BFA_STATUS_FABRIC_RJT: 801 case BFA_STATUS_FABRIC_RJT:
766 fabric->stats.flogi_rejects++; 802 fabric->stats.flogi_rejects++;
803 if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR &&
804 fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO)
805 fabric->fcs->bbscn_flogi_rjt = BFA_TRUE;
806
767 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); 807 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
768 return; 808 return;
769 809
@@ -793,6 +833,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
793 */ 833 */
794 fabric->bport.port_topo.pn2n.rem_port_wwn = 834 fabric->bport.port_topo.pn2n.rem_port_wwn =
795 fabric->lps->pr_pwwn; 835 fabric->lps->pr_pwwn;
836 fabric->fab_type = BFA_FCS_FABRIC_N2N;
796 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); 837 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
797 } 838 }
798 839
@@ -808,13 +849,17 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
808{ 849{
809 struct bfa_s *bfa = fabric->fcs->bfa; 850 struct bfa_s *bfa = fabric->fcs->bfa;
810 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; 851 struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
811 u8 alpa = 0; 852 u8 alpa = 0, bb_scn = 0;
812 853
813 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) 854 if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
814 alpa = bfa_fcport_get_myalpa(bfa); 855 alpa = bfa_fcport_get_myalpa(bfa);
815 856
857 if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
858 (!fabric->fcs->bbscn_flogi_rjt))
859 bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
860
816 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), 861 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
817 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); 862 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn);
818 863
819 fabric->stats.flogi_sent++; 864 fabric->stats.flogi_sent++;
820} 865}
@@ -873,6 +918,40 @@ bfa_fcs_fabric_delay(void *cbarg)
873} 918}
874 919
875/* 920/*
921 * Computes operating BB_SCN value
922 */
923static u8
924bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric)
925{
926 u8 pr_bbscn = fabric->lps->pr_bbscn;
927 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
928
929 if (!(fcport->cfg.bb_scn_state && pr_bbscn))
930 return 0;
931
932 /* return max of local/remote bb_scn values */
933 return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ?
934 pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN);
935}
936
937/*
938 * Check if BB_SCN can be enabled.
939 */
940static bfa_boolean_t
941bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric)
942{
943 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
944
945 if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) &&
946 fcport->cfg.bb_scn_state &&
947 !bfa_fcport_is_qos_enabled(fabric->fcs->bfa) &&
948 !bfa_fcport_is_trunk_enabled(fabric->fcs->bfa))
949 return BFA_TRUE;
950 else
951 return BFA_FALSE;
952}
953
954/*
876 * Delete all vports and wait for vport delete completions. 955 * Delete all vports and wait for vport delete completions.
877 */ 956 */
878static void 957static void
@@ -989,6 +1068,7 @@ void
989bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) 1068bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
990{ 1069{
991 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); 1070 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
1071 fabric->fcs->bbscn_flogi_rjt = BFA_FALSE;
992 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); 1072 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
993} 1073}
994 1074
@@ -1192,6 +1272,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
1192 } 1272 }
1193 1273
1194 fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); 1274 fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
1275 fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12);
1195 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; 1276 bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
1196 bport->port_topo.pn2n.reply_oxid = fchs->ox_id; 1277 bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
1197 1278
@@ -1224,9 +1305,10 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1224 n2n_port->reply_oxid, pcfg->pwwn, 1305 n2n_port->reply_oxid, pcfg->pwwn,
1225 pcfg->nwwn, 1306 pcfg->nwwn,
1226 bfa_fcport_get_maxfrsize(bfa), 1307 bfa_fcport_get_maxfrsize(bfa),
1227 bfa_fcport_get_rx_bbcredit(bfa)); 1308 bfa_fcport_get_rx_bbcredit(bfa),
1309 bfa_fcs_fabric_oper_bbscn(fabric));
1228 1310
1229 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag, 1311 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
1230 BFA_FALSE, FC_CLASS_3, 1312 BFA_FALSE, FC_CLASS_3,
1231 reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, 1313 reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
1232 FC_MAX_PDUSZ, 0); 1314 FC_MAX_PDUSZ, 0);
@@ -1298,6 +1380,45 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
1298} 1380}
1299 1381
1300/* 1382/*
1383 * Return the list of local logical ports present in the given VF.
1384 *
1385 * @param[in] vf vf for which logical ports are returned
1386 * @param[out] lpwwn returned logical port wwn list
1387 * @param[in,out] nlports in:size of lpwwn list;
1388 * out:total elements present,
1389 * actual elements returned is limited by the size
1390 */
1391void
1392bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
1393{
1394 struct list_head *qe;
1395 struct bfa_fcs_vport_s *vport;
1396 int i = 0;
1397 struct bfa_fcs_s *fcs;
1398
1399 if (vf == NULL || lpwwn == NULL || *nlports == 0)
1400 return;
1401
1402 fcs = vf->fcs;
1403
1404 bfa_trc(fcs, vf->vf_id);
1405 bfa_trc(fcs, (uint32_t) *nlports);
1406
1407 lpwwn[i++] = vf->bport.port_cfg.pwwn;
1408
1409 list_for_each(qe, &vf->vport_q) {
1410 if (i >= *nlports)
1411 break;
1412
1413 vport = (struct bfa_fcs_vport_s *) qe;
1414 lpwwn[i++] = vport->lport.port_cfg.pwwn;
1415 }
1416
1417 bfa_trc(fcs, i);
1418 *nlports = i;
1419}
1420
1421/*
1301 * BFA FCS PPORT ( physical port) 1422 * BFA FCS PPORT ( physical port)
1302 */ 1423 */
1303static void 1424static void
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 61cdce4bd913..a5f1faf335a7 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -254,6 +254,9 @@ struct bfa_fcs_fabric_s;
254#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 254#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
255#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 255#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
256 256
257/* bb_scn value in 2^bb_scn */
258#define BFA_FCS_PORT_DEF_BB_SCN 3
259
257/* 260/*
258 * Get FC port ID for a logical port. 261 * Get FC port ID for a logical port.
259 */ 262 */
@@ -379,6 +382,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
379void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); 382void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
380void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); 383void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
381void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport); 384void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
385void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
382 386
383#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */ 387#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
384#define BFA_FCS_RPORT_MAX_RETRIES (5) 388#define BFA_FCS_RPORT_MAX_RETRIES (5)
@@ -420,6 +424,7 @@ struct bfa_fcs_rport_s {
420 enum fc_cos fc_cos; /* FC classes of service supp */ 424 enum fc_cos fc_cos; /* FC classes of service supp */
421 bfa_boolean_t cisc; /* CISC capable device */ 425 bfa_boolean_t cisc; /* CISC capable device */
422 bfa_boolean_t prlo; /* processing prlo or LOGO */ 426 bfa_boolean_t prlo; /* processing prlo or LOGO */
427 bfa_boolean_t plogi_pending; /* Rx Plogi Pending */
423 wwn_t pwwn; /* port wwn of rport */ 428 wwn_t pwwn; /* port wwn of rport */
424 wwn_t nwwn; /* node wwn of rport */ 429 wwn_t nwwn; /* node wwn of rport */
425 struct bfa_rport_symname_s psym_name; /* port symbolic name */ 430 struct bfa_rport_symname_s psym_name; /* port symbolic name */
@@ -447,6 +452,8 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
447/* 452/*
448 * bfa fcs rport API functions 453 * bfa fcs rport API functions
449 */ 454 */
455void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
456 struct bfa_rport_attr_s *attr);
450struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, 457struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
451 wwn_t rpwwn); 458 wwn_t rpwwn);
452struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( 459struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
@@ -591,10 +598,21 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
591void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, 598void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
592 struct fchs_s *fchs, u16 len); 599 struct fchs_s *fchs, u16 len);
593 600
594#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \ 601#define BFA_FCS_FDMI_SUPP_SPEEDS_4G (FDMI_TRANS_SPEED_1G | \
595 FDMI_TRANS_SPEED_2G | \ 602 FDMI_TRANS_SPEED_2G | \
596 FDMI_TRANS_SPEED_4G | \ 603 FDMI_TRANS_SPEED_4G)
597 FDMI_TRANS_SPEED_8G) 604
605#define BFA_FCS_FDMI_SUPP_SPEEDS_8G (FDMI_TRANS_SPEED_1G | \
606 FDMI_TRANS_SPEED_2G | \
607 FDMI_TRANS_SPEED_4G | \
608 FDMI_TRANS_SPEED_8G)
609
610#define BFA_FCS_FDMI_SUPP_SPEEDS_16G (FDMI_TRANS_SPEED_2G | \
611 FDMI_TRANS_SPEED_4G | \
612 FDMI_TRANS_SPEED_8G | \
613 FDMI_TRANS_SPEED_16G)
614
615#define BFA_FCS_FDMI_SUPP_SPEEDS_10G FDMI_TRANS_SPEED_10G
598 616
599/* 617/*
600 * HBA Attribute Block : BFA internal representation. Note : Some variable 618 * HBA Attribute Block : BFA internal representation. Note : Some variable
@@ -649,6 +667,8 @@ struct bfa_fcs_s {
649 struct bfa_trc_mod_s *trcmod; /* tracing module */ 667 struct bfa_trc_mod_s *trcmod; /* tracing module */
650 bfa_boolean_t vf_enabled; /* VF mode is enabled */ 668 bfa_boolean_t vf_enabled; /* VF mode is enabled */
651 bfa_boolean_t fdmi_enabled; /* FDMI is enabled */ 669 bfa_boolean_t fdmi_enabled; /* FDMI is enabled */
670 bfa_boolean_t bbscn_enabled; /* Driver Config Parameter */
671 bfa_boolean_t bbscn_flogi_rjt;/* FLOGI reject due to BB_SCN */
652 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ 672 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
653 u16 port_vfid; /* port default VF ID */ 673 u16 port_vfid; /* port default VF ID */
654 struct bfa_fcs_driver_info_s driver_info; 674 struct bfa_fcs_driver_info_s driver_info;
@@ -715,6 +735,8 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
715 struct bfad_s *bfad, 735 struct bfad_s *bfad,
716 bfa_boolean_t min_cfg); 736 bfa_boolean_t min_cfg);
717void bfa_fcs_init(struct bfa_fcs_s *fcs); 737void bfa_fcs_init(struct bfa_fcs_s *fcs);
738void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs);
739void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
718void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 740void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
719 struct bfa_fcs_driver_info_s *driver_info); 741 struct bfa_fcs_driver_info_s *driver_info);
720void bfa_fcs_exit(struct bfa_fcs_s *fcs); 742void bfa_fcs_exit(struct bfa_fcs_s *fcs);
@@ -723,6 +745,7 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
723 * bfa fcs vf public functions 745 * bfa fcs vf public functions
724 */ 746 */
725bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); 747bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
748void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
726 749
727/* 750/*
728 * fabric protected interface functions 751 * fabric protected interface functions
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index e7b49f4cb51f..29b4108be269 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -54,6 +54,7 @@ enum bfa_fcs_itnim_event {
54 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ 54 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
55 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ 55 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
56 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ 56 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
57 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
57}; 58};
58 59
59static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, 60static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
@@ -178,6 +179,10 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
178 BFA_FCS_RETRY_TIMEOUT); 179 BFA_FCS_RETRY_TIMEOUT);
179 break; 180 break;
180 181
182 case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP:
183 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
184 break;
185
181 case BFA_FCS_ITNIM_SM_OFFLINE: 186 case BFA_FCS_ITNIM_SM_OFFLINE:
182 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 187 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
183 bfa_fcxp_discard(itnim->fcxp); 188 bfa_fcxp_discard(itnim->fcxp);
@@ -447,6 +452,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
447 itnim->rport->scsi_function = 452 itnim->rport->scsi_function =
448 BFA_RPORT_INITIATOR; 453 BFA_RPORT_INITIATOR;
449 itnim->stats.prli_rsp_acc++; 454 itnim->stats.prli_rsp_acc++;
455 itnim->stats.initiator++;
450 bfa_sm_send_event(itnim, 456 bfa_sm_send_event(itnim,
451 BFA_FCS_ITNIM_SM_RSP_OK); 457 BFA_FCS_ITNIM_SM_RSP_OK);
452 return; 458 return;
@@ -472,6 +478,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
472 bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); 478 bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
473 479
474 itnim->stats.prli_rsp_rjt++; 480 itnim->stats.prli_rsp_rjt++;
481 if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
482 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP);
483 return;
484 }
475 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); 485 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
476 } 486 }
477} 487}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 1d6be8c14473..f8251a91ba91 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -74,6 +74,7 @@ enum bfa_fcs_lport_event {
74 BFA_FCS_PORT_SM_OFFLINE = 3, 74 BFA_FCS_PORT_SM_OFFLINE = 3,
75 BFA_FCS_PORT_SM_DELETE = 4, 75 BFA_FCS_PORT_SM_DELETE = 4,
76 BFA_FCS_PORT_SM_DELRPORT = 5, 76 BFA_FCS_PORT_SM_DELRPORT = 5,
77 BFA_FCS_PORT_SM_STOP = 6,
77}; 78};
78 79
79static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port, 80static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
@@ -86,6 +87,8 @@ static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
86 enum bfa_fcs_lport_event event); 87 enum bfa_fcs_lport_event event);
87static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port, 88static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
88 enum bfa_fcs_lport_event event); 89 enum bfa_fcs_lport_event event);
90static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
91 enum bfa_fcs_lport_event event);
89 92
90static void 93static void
91bfa_fcs_lport_sm_uninit( 94bfa_fcs_lport_sm_uninit(
@@ -123,6 +126,12 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
123 bfa_fcs_lport_deleted(port); 126 bfa_fcs_lport_deleted(port);
124 break; 127 break;
125 128
129 case BFA_FCS_PORT_SM_STOP:
130 /* If vport - send completion call back */
131 if (port->vport)
132 bfa_fcs_vport_stop_comp(port->vport);
133 break;
134
126 case BFA_FCS_PORT_SM_OFFLINE: 135 case BFA_FCS_PORT_SM_OFFLINE:
127 break; 136 break;
128 137
@@ -148,6 +157,23 @@ bfa_fcs_lport_sm_online(
148 bfa_fcs_lport_offline_actions(port); 157 bfa_fcs_lport_offline_actions(port);
149 break; 158 break;
150 159
160 case BFA_FCS_PORT_SM_STOP:
161 __port_action[port->fabric->fab_type].offline(port);
162
163 if (port->num_rports == 0) {
164 bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
165 /* If vport - send completion call back */
166 if (port->vport)
167 bfa_fcs_vport_stop_comp(port->vport);
168 } else {
169 bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
170 list_for_each_safe(qe, qen, &port->rport_q) {
171 rport = (struct bfa_fcs_rport_s *) qe;
172 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
173 }
174 }
175 break;
176
151 case BFA_FCS_PORT_SM_DELETE: 177 case BFA_FCS_PORT_SM_DELETE:
152 178
153 __port_action[port->fabric->fab_type].offline(port); 179 __port_action[port->fabric->fab_type].offline(port);
@@ -189,6 +215,21 @@ bfa_fcs_lport_sm_offline(
189 bfa_fcs_lport_online_actions(port); 215 bfa_fcs_lport_online_actions(port);
190 break; 216 break;
191 217
218 case BFA_FCS_PORT_SM_STOP:
219 if (port->num_rports == 0) {
220 bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
221 /* If vport - send completion call back */
222 if (port->vport)
223 bfa_fcs_vport_stop_comp(port->vport);
224 } else {
225 bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
226 list_for_each_safe(qe, qen, &port->rport_q) {
227 rport = (struct bfa_fcs_rport_s *) qe;
228 bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
229 }
230 }
231 break;
232
192 case BFA_FCS_PORT_SM_DELETE: 233 case BFA_FCS_PORT_SM_DELETE:
193 if (port->num_rports == 0) { 234 if (port->num_rports == 0) {
194 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); 235 bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
@@ -212,6 +253,28 @@ bfa_fcs_lport_sm_offline(
212} 253}
213 254
214static void 255static void
256bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
257 enum bfa_fcs_lport_event event)
258{
259 bfa_trc(port->fcs, port->port_cfg.pwwn);
260 bfa_trc(port->fcs, event);
261
262 switch (event) {
263 case BFA_FCS_PORT_SM_DELRPORT:
264 if (port->num_rports == 0) {
265 bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
266 /* If vport - send completion call back */
267 if (port->vport)
268 bfa_fcs_vport_stop_comp(port->vport);
269 }
270 break;
271
272 default:
273 bfa_sm_fault(port->fcs, event);
274 }
275}
276
277static void
215bfa_fcs_lport_sm_deleting( 278bfa_fcs_lport_sm_deleting(
216 struct bfa_fcs_lport_s *port, 279 struct bfa_fcs_lport_s *port,
217 enum bfa_fcs_lport_event event) 280 enum bfa_fcs_lport_event event)
@@ -265,6 +328,40 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
265} 328}
266 329
267/* 330/*
331 * Send a FCCT Reject
332 */
333static void
334bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
335 struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
336{
337 struct fchs_s fchs;
338 struct bfa_fcxp_s *fcxp;
339 struct bfa_rport_s *bfa_rport = NULL;
340 int len;
341 struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
342 struct ct_hdr_s *ct_hdr;
343
344 bfa_trc(port->fcs, rx_fchs->d_id);
345 bfa_trc(port->fcs, rx_fchs->s_id);
346
347 fcxp = bfa_fcs_fcxp_alloc(port->fcs);
348 if (!fcxp)
349 return;
350
351 ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
352 ct_hdr->gs_type = rx_cthdr->gs_type;
353 ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
354
355 len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
356 bfa_fcs_lport_get_fcid(port),
357 rx_fchs->ox_id, reason_code, reason_code_expl);
358
359 bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
360 BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
361 FC_MAX_PDUSZ, 0);
362}
363
364/*
268 * Process incoming plogi from a remote port. 365 * Process incoming plogi from a remote port.
269 */ 366 */
270static void 367static void
@@ -647,6 +744,16 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
647 bfa_fcs_lport_abts_acc(lport, fchs); 744 bfa_fcs_lport_abts_acc(lport, fchs);
648 return; 745 return;
649 } 746 }
747
748 if (fchs->type == FC_TYPE_SERVICES) {
749 /*
750 * Unhandled FC-GS frames. Send a FC-CT Reject
751 */
752 bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
753 CT_NS_EXP_NOADDITIONAL);
754 return;
755 }
756
650 /* 757 /*
651 * look for a matching remote port ID 758 * look for a matching remote port ID
652 */ 759 */
@@ -835,8 +942,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
835 lport->fcs = fcs; 942 lport->fcs = fcs;
836 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 943 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
837 lport->vport = vport; 944 lport->vport = vport;
838 lport->lp_tag = (vport) ? vport->lps->lp_tag : 945 lport->lp_tag = (vport) ? vport->lps->bfa_tag :
839 lport->fabric->lps->lp_tag; 946 lport->fabric->lps->bfa_tag;
840 947
841 INIT_LIST_HEAD(&lport->rport_q); 948 INIT_LIST_HEAD(&lport->rport_q);
842 lport->num_rports = 0; 949 lport->num_rports = 0;
@@ -1074,6 +1181,8 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1074 struct bfa_fcs_fdmi_hba_attr_s *hba_attr); 1181 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
1075static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, 1182static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
1076 struct bfa_fcs_fdmi_port_attr_s *port_attr); 1183 struct bfa_fcs_fdmi_port_attr_s *port_attr);
1184u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
1185
1077/* 1186/*
1078 * fcs_fdmi_sm FCS FDMI state machine 1187 * fcs_fdmi_sm FCS FDMI state machine
1079 */ 1188 */
@@ -1672,7 +1781,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
1672 memcpy(attr->value, fcs_hba_attr->driver_version, templen); 1781 memcpy(attr->value, fcs_hba_attr->driver_version, templen);
1673 templen = fc_roundup(templen, sizeof(u32)); 1782 templen = fc_roundup(templen, sizeof(u32));
1674 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; 1783 curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
1675 len += templen;; 1784 len += templen;
1676 count++; 1785 count++;
1677 attr->len = cpu_to_be16(templen + sizeof(attr->type) + 1786 attr->len = cpu_to_be16(templen + sizeof(attr->type) +
1678 sizeof(templen)); 1787 sizeof(templen));
@@ -2160,12 +2269,36 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2160 /* 2269 /*
2161 * Supported Speeds 2270 * Supported Speeds
2162 */ 2271 */
2163 port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS); 2272 switch (pport_attr.speed_supported) {
2273 case BFA_PORT_SPEED_16GBPS:
2274 port_attr->supp_speed =
2275 cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
2276 break;
2277
2278 case BFA_PORT_SPEED_10GBPS:
2279 port_attr->supp_speed =
2280 cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
2281 break;
2282
2283 case BFA_PORT_SPEED_8GBPS:
2284 port_attr->supp_speed =
2285 cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
2286 break;
2287
2288 case BFA_PORT_SPEED_4GBPS:
2289 port_attr->supp_speed =
2290 cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
2291 break;
2292
2293 default:
2294 bfa_sm_fault(port->fcs, pport_attr.speed_supported);
2295 }
2164 2296
2165 /* 2297 /*
2166 * Current Speed 2298 * Current Speed
2167 */ 2299 */
2168 port_attr->curr_speed = cpu_to_be32(pport_attr.speed); 2300 port_attr->curr_speed = cpu_to_be32(
2301 bfa_fcs_fdmi_convert_speed(pport_attr.speed));
2169 2302
2170 /* 2303 /*
2171 * Max PDU Size. 2304 * Max PDU Size.
@@ -2186,6 +2319,41 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2186 2319
2187} 2320}
2188 2321
2322/*
2323 * Convert BFA speed to FDMI format.
2324 */
2325u32
2326bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
2327{
2328 u32 ret;
2329
2330 switch (pport_speed) {
2331 case BFA_PORT_SPEED_1GBPS:
2332 case BFA_PORT_SPEED_2GBPS:
2333 ret = pport_speed;
2334 break;
2335
2336 case BFA_PORT_SPEED_4GBPS:
2337 ret = FDMI_TRANS_SPEED_4G;
2338 break;
2339
2340 case BFA_PORT_SPEED_8GBPS:
2341 ret = FDMI_TRANS_SPEED_8G;
2342 break;
2343
2344 case BFA_PORT_SPEED_10GBPS:
2345 ret = FDMI_TRANS_SPEED_10G;
2346 break;
2347
2348 case BFA_PORT_SPEED_16GBPS:
2349 ret = FDMI_TRANS_SPEED_16G;
2350 break;
2351
2352 default:
2353 ret = FDMI_TRANS_SPEED_UNKNOWN;
2354 }
2355 return ret;
2356}
2189 2357
2190void 2358void
2191bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms) 2359bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
@@ -2829,7 +2997,8 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2829 bfa_hton3b(FC_MGMT_SERVER), 2997 bfa_hton3b(FC_MGMT_SERVER),
2830 bfa_fcs_lport_get_fcid(port), 0, 2998 bfa_fcs_lport_get_fcid(port), 0,
2831 port->port_cfg.pwwn, port->port_cfg.nwwn, 2999 port->port_cfg.pwwn, port->port_cfg.nwwn,
2832 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 3000 bfa_fcport_get_maxfrsize(port->fcs->bfa),
3001 bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
2833 3002
2834 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 3003 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
2835 FC_CLASS_3, len, &fchs, 3004 FC_CLASS_3, len, &fchs,
@@ -3573,7 +3742,7 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3573 3742
3574 bfa_trc(port->fcs, port->pid); 3743 bfa_trc(port->fcs, port->pid);
3575 3744
3576fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 3745 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3577 if (!fcxp) { 3746 if (!fcxp) {
3578 port->stats.ns_plogi_alloc_wait++; 3747 port->stats.ns_plogi_alloc_wait++;
3579 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, 3748 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
@@ -3586,7 +3755,8 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
3586 bfa_hton3b(FC_NAME_SERVER), 3755 bfa_hton3b(FC_NAME_SERVER),
3587 bfa_fcs_lport_get_fcid(port), 0, 3756 bfa_fcs_lport_get_fcid(port), 0,
3588 port->port_cfg.pwwn, port->port_cfg.nwwn, 3757 port->port_cfg.pwwn, port->port_cfg.nwwn,
3589 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 3758 bfa_fcport_get_maxfrsize(port->fcs->bfa),
3759 bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
3590 3760
3591 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 3761 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
3592 FC_CLASS_3, len, &fchs, 3762 FC_CLASS_3, len, &fchs,
@@ -4762,8 +4932,8 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
4762 while (qe != qh) { 4932 while (qe != qh) {
4763 rport = (struct bfa_fcs_rport_s *) qe; 4933 rport = (struct bfa_fcs_rport_s *) qe;
4764 if ((bfa_ntoh3b(rport->pid) > 0xFFF000) || 4934 if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
4765 (bfa_fcs_rport_get_state(rport) == 4935 (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
4766 BFA_RPORT_OFFLINE)) { 4936 (rport->scsi_function != BFA_RPORT_TARGET)) {
4767 qe = bfa_q_next(qe); 4937 qe = bfa_q_next(qe);
4768 continue; 4938 continue;
4769 } 4939 }
@@ -4776,17 +4946,15 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
4776 bfa_fcport_get_ratelim_speed(port->fcs->bfa); 4946 bfa_fcport_get_ratelim_speed(port->fcs->bfa);
4777 } 4947 }
4778 4948
4779 if ((rport_speed == BFA_PORT_SPEED_8GBPS) || 4949 if (rport_speed > max_speed)
4780 (rport_speed > port_speed)) {
4781 max_speed = rport_speed; 4950 max_speed = rport_speed;
4782 break;
4783 } else if (rport_speed > max_speed) {
4784 max_speed = rport_speed;
4785 }
4786 4951
4787 qe = bfa_q_next(qe); 4952 qe = bfa_q_next(qe);
4788 } 4953 }
4789 4954
4955 if (max_speed > port_speed)
4956 max_speed = port_speed;
4957
4790 bfa_trc(fcs, max_speed); 4958 bfa_trc(fcs, max_speed);
4791 return max_speed; 4959 return max_speed;
4792} 4960}
@@ -4918,6 +5086,7 @@ enum bfa_fcs_vport_event {
4918 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */ 5086 BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
4919 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/ 5087 BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
4920 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ 5088 BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
5089 BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
4921}; 5090};
4922 5091
4923static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, 5092static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
@@ -4930,6 +5099,8 @@ static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
4930 enum bfa_fcs_vport_event event); 5099 enum bfa_fcs_vport_event event);
4931static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, 5100static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
4932 enum bfa_fcs_vport_event event); 5101 enum bfa_fcs_vport_event event);
5102static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
5103 enum bfa_fcs_vport_event event);
4933static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, 5104static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
4934 enum bfa_fcs_vport_event event); 5105 enum bfa_fcs_vport_event event);
4935static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, 5106static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
@@ -4940,6 +5111,10 @@ static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
4940 enum bfa_fcs_vport_event event); 5111 enum bfa_fcs_vport_event event);
4941static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, 5112static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
4942 enum bfa_fcs_vport_event event); 5113 enum bfa_fcs_vport_event event);
5114static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
5115 enum bfa_fcs_vport_event event);
5116static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
5117 enum bfa_fcs_vport_event event);
4943 5118
4944static struct bfa_sm_table_s vport_sm_table[] = { 5119static struct bfa_sm_table_s vport_sm_table[] = {
4945 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, 5120 {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
@@ -4947,6 +5122,7 @@ static struct bfa_sm_table_s vport_sm_table[] = {
4947 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, 5122 {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
4948 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC}, 5123 {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
4949 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY}, 5124 {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
5125 {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
4950 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE}, 5126 {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
4951 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING}, 5127 {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
4952 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP}, 5128 {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
@@ -5042,6 +5218,11 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
5042 bfa_fcs_vport_do_fdisc(vport); 5218 bfa_fcs_vport_do_fdisc(vport);
5043 break; 5219 break;
5044 5220
5221 case BFA_FCS_VPORT_SM_STOP:
5222 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5223 bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
5224 break;
5225
5045 case BFA_FCS_VPORT_SM_OFFLINE: 5226 case BFA_FCS_VPORT_SM_OFFLINE:
5046 /* 5227 /*
5047 * This can happen if the vport couldn't be initialzied 5228 * This can happen if the vport couldn't be initialzied
@@ -5070,9 +5251,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
5070 5251
5071 switch (event) { 5252 switch (event) {
5072 case BFA_FCS_VPORT_SM_DELETE: 5253 case BFA_FCS_VPORT_SM_DELETE:
5073 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); 5254 bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
5074 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5075 bfa_fcs_lport_delete(&vport->lport);
5076 break; 5255 break;
5077 5256
5078 case BFA_FCS_VPORT_SM_OFFLINE: 5257 case BFA_FCS_VPORT_SM_OFFLINE:
@@ -5140,6 +5319,41 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
5140} 5319}
5141 5320
5142/* 5321/*
5322 * FDISC is in progress and we got a vport delete request -
5323 * this is a wait state while we wait for fdisc response and
5324 * we will transition to the appropriate state - on rsp status.
5325 */
5326static void
5327bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
5328 enum bfa_fcs_vport_event event)
5329{
5330 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5331 bfa_trc(__vport_fcs(vport), event);
5332
5333 switch (event) {
5334 case BFA_FCS_VPORT_SM_RSP_OK:
5335 bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
5336 bfa_fcs_lport_delete(&vport->lport);
5337 break;
5338
5339 case BFA_FCS_VPORT_SM_DELETE:
5340 break;
5341
5342 case BFA_FCS_VPORT_SM_OFFLINE:
5343 case BFA_FCS_VPORT_SM_RSP_ERROR:
5344 case BFA_FCS_VPORT_SM_RSP_FAILED:
5345 case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
5346 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5347 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5348 bfa_fcs_lport_delete(&vport->lport);
5349 break;
5350
5351 default:
5352 bfa_sm_fault(__vport_fcs(vport), event);
5353 }
5354}
5355
5356/*
5143 * Vport is online (FDISC is complete). 5357 * Vport is online (FDISC is complete).
5144 */ 5358 */
5145static void 5359static void
@@ -5155,6 +5369,11 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5155 bfa_fcs_lport_delete(&vport->lport); 5369 bfa_fcs_lport_delete(&vport->lport);
5156 break; 5370 break;
5157 5371
5372 case BFA_FCS_VPORT_SM_STOP:
5373 bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
5374 bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
5375 break;
5376
5158 case BFA_FCS_VPORT_SM_OFFLINE: 5377 case BFA_FCS_VPORT_SM_OFFLINE:
5159 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); 5378 bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
5160 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); 5379 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
@@ -5167,6 +5386,32 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
5167} 5386}
5168 5387
5169/* 5388/*
5389 * Vport is being stopped - awaiting lport stop completion to send
5390 * LOGO to fabric.
5391 */
5392static void
5393bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
5394 enum bfa_fcs_vport_event event)
5395{
5396 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5397 bfa_trc(__vport_fcs(vport), event);
5398
5399 switch (event) {
5400 case BFA_FCS_VPORT_SM_STOPCOMP:
5401 bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
5402 bfa_fcs_vport_do_logo(vport);
5403 break;
5404
5405 case BFA_FCS_VPORT_SM_OFFLINE:
5406 bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
5407 break;
5408
5409 default:
5410 bfa_sm_fault(__vport_fcs(vport), event);
5411 }
5412}
5413
5414/*
5170 * Vport is being deleted - awaiting lport delete completion to send 5415 * Vport is being deleted - awaiting lport delete completion to send
5171 * LOGO to fabric. 5416 * LOGO to fabric.
5172 */ 5417 */
@@ -5236,6 +5481,10 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5236 bfa_fcs_vport_free(vport); 5481 bfa_fcs_vport_free(vport);
5237 break; 5482 break;
5238 5483
5484 case BFA_FCS_VPORT_SM_STOPCOMP:
5485 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
5486 break;
5487
5239 case BFA_FCS_VPORT_SM_DELETE: 5488 case BFA_FCS_VPORT_SM_DELETE:
5240 break; 5489 break;
5241 5490
@@ -5245,6 +5494,34 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
5245} 5494}
5246 5495
5247/* 5496/*
5497 * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
5498 * is done.
5499 */
5500static void
5501bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
5502 enum bfa_fcs_vport_event event)
5503{
5504 bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
5505 bfa_trc(__vport_fcs(vport), event);
5506
5507 switch (event) {
5508 case BFA_FCS_VPORT_SM_OFFLINE:
5509 bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
5510 /*
5511 * !!! fall through !!!
5512 */
5513
5514 case BFA_FCS_VPORT_SM_RSP_OK:
5515 case BFA_FCS_VPORT_SM_RSP_ERROR:
5516 bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
5517 break;
5518
5519 default:
5520 bfa_sm_fault(__vport_fcs(vport), event);
5521 }
5522}
5523
5524/*
5248 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup 5525 * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
5249 * is done. 5526 * is done.
5250 */ 5527 */
@@ -5391,7 +5668,10 @@ void
5391bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) 5668bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
5392{ 5669{
5393 vport->vport_stats.fab_online++; 5670 vport->vport_stats.fab_online++;
5394 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); 5671 if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
5672 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
5673 else
5674 vport->vport_stats.fab_no_npiv++;
5395} 5675}
5396 5676
5397/* 5677/*
@@ -5422,6 +5702,15 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
5422} 5702}
5423 5703
5424/* 5704/*
5705 * Stop completion callback from associated lport
5706 */
5707void
5708bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
5709{
5710 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
5711}
5712
5713/*
5425 * Delete completion callback from associated lport 5714 * Delete completion callback from associated lport
5426 */ 5715 */
5427void 5716void
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index caaee6f06937..2c514458a6b4 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -262,6 +262,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
262 break; 262 break;
263 263
264 case RPSM_EVENT_PLOGI_RCVD: 264 case RPSM_EVENT_PLOGI_RCVD:
265 case RPSM_EVENT_PLOGI_COMP:
265 case RPSM_EVENT_SCN: 266 case RPSM_EVENT_SCN:
266 /* 267 /*
267 * Ignore, SCN is possibly online notification. 268 * Ignore, SCN is possibly online notification.
@@ -470,6 +471,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
470 break; 471 break;
471 472
472 case RPSM_EVENT_PRLO_RCVD: 473 case RPSM_EVENT_PRLO_RCVD:
474 case RPSM_EVENT_PLOGI_COMP:
473 break; 475 break;
474 476
475 case RPSM_EVENT_LOGO_RCVD: 477 case RPSM_EVENT_LOGO_RCVD:
@@ -484,9 +486,9 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
484 break; 486 break;
485 487
486 case RPSM_EVENT_PLOGI_RCVD: 488 case RPSM_EVENT_PLOGI_RCVD:
487 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); 489 rport->plogi_pending = BFA_TRUE;
490 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
488 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 491 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
489 bfa_fcs_rport_send_plogiacc(rport, NULL);
490 break; 492 break;
491 493
492 case RPSM_EVENT_DELETE: 494 case RPSM_EVENT_DELETE:
@@ -891,6 +893,18 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
891 893
892 switch (event) { 894 switch (event) {
893 case RPSM_EVENT_HCB_OFFLINE: 895 case RPSM_EVENT_HCB_OFFLINE:
896 if (bfa_fcs_lport_is_online(rport->port) &&
897 (rport->plogi_pending)) {
898 rport->plogi_pending = BFA_FALSE;
899 bfa_sm_set_state(rport,
900 bfa_fcs_rport_sm_plogiacc_sending);
901 bfa_fcs_rport_send_plogiacc(rport, NULL);
902 break;
903 }
904 /*
905 * !! fall through !!
906 */
907
894 case RPSM_EVENT_ADDRESS_CHANGE: 908 case RPSM_EVENT_ADDRESS_CHANGE:
895 if (bfa_fcs_lport_is_online(rport->port)) { 909 if (bfa_fcs_lport_is_online(rport->port)) {
896 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { 910 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
@@ -921,6 +935,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
921 case RPSM_EVENT_SCN: 935 case RPSM_EVENT_SCN:
922 case RPSM_EVENT_LOGO_RCVD: 936 case RPSM_EVENT_LOGO_RCVD:
923 case RPSM_EVENT_PRLO_RCVD: 937 case RPSM_EVENT_PRLO_RCVD:
938 case RPSM_EVENT_PLOGI_RCVD:
939 case RPSM_EVENT_LOGO_IMP:
924 /* 940 /*
925 * Ignore, already offline. 941 * Ignore, already offline.
926 */ 942 */
@@ -957,10 +973,18 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
957 */ 973 */
958 if (bfa_fcs_lport_is_online(rport->port) && 974 if (bfa_fcs_lport_is_online(rport->port) &&
959 (!BFA_FCS_PID_IS_WKA(rport->pid))) { 975 (!BFA_FCS_PID_IS_WKA(rport->pid))) {
960 bfa_sm_set_state(rport, 976 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
961 bfa_fcs_rport_sm_nsdisc_sending); 977 bfa_sm_set_state(rport,
962 rport->ns_retries = 0; 978 bfa_fcs_rport_sm_nsdisc_sending);
963 bfa_fcs_rport_send_nsdisc(rport, NULL); 979 rport->ns_retries = 0;
980 bfa_fcs_rport_send_nsdisc(rport, NULL);
981 } else {
982 /* For N2N Direct Attach, try to re-login */
983 bfa_sm_set_state(rport,
984 bfa_fcs_rport_sm_plogi_sending);
985 rport->plogi_retries = 0;
986 bfa_fcs_rport_send_plogi(rport, NULL);
987 }
964 } else { 988 } else {
965 /* 989 /*
966 * if it is not a well known address, reset the 990 * if it is not a well known address, reset the
@@ -1356,7 +1380,8 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1356 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1380 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1357 bfa_fcs_lport_get_fcid(port), 0, 1381 bfa_fcs_lport_get_fcid(port), 0,
1358 port->port_cfg.pwwn, port->port_cfg.nwwn, 1382 port->port_cfg.pwwn, port->port_cfg.nwwn,
1359 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1383 bfa_fcport_get_maxfrsize(port->fcs->bfa),
1384 bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
1360 1385
1361 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1386 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1362 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, 1387 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1476,7 +1501,8 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1476 rport->pid, bfa_fcs_lport_get_fcid(port), 1501 rport->pid, bfa_fcs_lport_get_fcid(port),
1477 rport->reply_oxid, port->port_cfg.pwwn, 1502 rport->reply_oxid, port->port_cfg.pwwn,
1478 port->port_cfg.nwwn, 1503 port->port_cfg.nwwn,
1479 bfa_fcport_get_maxfrsize(port->fcs->bfa)); 1504 bfa_fcport_get_maxfrsize(port->fcs->bfa),
1505 bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
1480 1506
1481 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1507 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1482 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1508 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -2024,6 +2050,11 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2024 2050
2025 rport->stats.onlines++; 2051 rport->stats.onlines++;
2026 2052
2053 if ((!rport->pid) || (!rport->pwwn)) {
2054 bfa_trc(rport->fcs, rport->pid);
2055 bfa_sm_fault(rport->fcs, rport->pid);
2056 }
2057
2027 if (bfa_fcs_lport_is_initiator(port)) { 2058 if (bfa_fcs_lport_is_initiator(port)) {
2028 bfa_fcs_itnim_rport_online(rport->itnim); 2059 bfa_fcs_itnim_rport_online(rport->itnim);
2029 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2060 if (!BFA_FCS_PID_IS_WKA(rport->pid))
@@ -2047,6 +2078,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2047 char rpwwn_buf[BFA_STRING_32]; 2078 char rpwwn_buf[BFA_STRING_32];
2048 2079
2049 rport->stats.offlines++; 2080 rport->stats.offlines++;
2081 rport->plogi_pending = BFA_FALSE;
2050 2082
2051 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2083 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2052 wwn2str(rpwwn_buf, rport->pwwn); 2084 wwn2str(rpwwn_buf, rport->pwwn);
@@ -2120,7 +2152,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2120 2152
2121 port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); 2153 port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
2122 bfa_fcport_set_tx_bbcredit(port->fcs->bfa, 2154 bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
2123 port->fabric->bb_credit); 2155 port->fabric->bb_credit, 0);
2124 } 2156 }
2125 2157
2126} 2158}
@@ -2233,22 +2265,6 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
2233 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2265 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
2234} 2266}
2235 2267
2236static int
2237wwn_compare(wwn_t wwn1, wwn_t wwn2)
2238{
2239 u8 *b1 = (u8 *) &wwn1;
2240 u8 *b2 = (u8 *) &wwn2;
2241 int i;
2242
2243 for (i = 0; i < sizeof(wwn_t); i++) {
2244 if (b1[i] < b2[i])
2245 return -1;
2246 if (b1[i] > b2[i])
2247 return 1;
2248 }
2249 return 0;
2250}
2251
2252/* 2268/*
2253 * Called by bport/vport to handle PLOGI received from an existing 2269 * Called by bport/vport to handle PLOGI received from an existing
2254 * remote port. 2270 * remote port.
@@ -2266,19 +2282,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2266 rport->reply_oxid = rx_fchs->ox_id; 2282 rport->reply_oxid = rx_fchs->ox_id;
2267 bfa_trc(rport->fcs, rport->reply_oxid); 2283 bfa_trc(rport->fcs, rport->reply_oxid);
2268 2284
2269 /* 2285 rport->pid = rx_fchs->s_id;
2270 * In Switched fabric topology, 2286 bfa_trc(rport->fcs, rport->pid);
2271 * PLOGI to each other. If our pwwn is smaller, ignore it,
2272 * if it is not a well known address.
2273 * If the link topology is N2N,
2274 * this Plogi should be accepted.
2275 */
2276 if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) &&
2277 (bfa_fcs_fabric_is_switched(rport->port->fabric)) &&
2278 (!BFA_FCS_PID_IS_WKA(rport->pid))) {
2279 bfa_trc(rport->fcs, rport->pid);
2280 return;
2281 }
2282 2287
2283 rport->stats.plogi_rcvd++; 2288 rport->stats.plogi_rcvd++;
2284 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); 2289 bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
@@ -2531,7 +2536,45 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
2531 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); 2536 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
2532} 2537}
2533 2538
2534 2539void
2540bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2541 struct bfa_rport_attr_s *rport_attr)
2542{
2543 struct bfa_rport_qos_attr_s qos_attr;
2544 struct bfa_fcs_lport_s *port = rport->port;
2545 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
2546
2547 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
2548 memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
2549
2550 rport_attr->pid = rport->pid;
2551 rport_attr->pwwn = rport->pwwn;
2552 rport_attr->nwwn = rport->nwwn;
2553 rport_attr->cos_supported = rport->fc_cos;
2554 rport_attr->df_sz = rport->maxfrsize;
2555 rport_attr->state = bfa_fcs_rport_get_state(rport);
2556 rport_attr->fc_cos = rport->fc_cos;
2557 rport_attr->cisc = rport->cisc;
2558 rport_attr->scsi_function = rport->scsi_function;
2559 rport_attr->curr_speed = rport->rpf.rpsc_speed;
2560 rport_attr->assigned_speed = rport->rpf.assigned_speed;
2561
2562 qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
2563 qos_attr.qos_flow_id =
2564 cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
2565 rport_attr->qos_attr = qos_attr;
2566
2567 rport_attr->trl_enforced = BFA_FALSE;
2568 if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
2569 (rport->scsi_function == BFA_RPORT_TARGET)) {
2570 if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
2571 rport_speed =
2572 bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
2573
2574 if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
2575 rport_attr->trl_enforced = BFA_TRUE;
2576 }
2577}
2535 2578
2536/* 2579/*
2537 * Remote port implementation. 2580 * Remote port implementation.
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index 977e681ec803..e7ffd8205dc7 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -17,14 +17,14 @@
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfa_modules.h" 19#include "bfa_modules.h"
20#include "bfi_cbreg.h" 20#include "bfi_reg.h"
21 21
22void 22void
23bfa_hwcb_reginit(struct bfa_s *bfa) 23bfa_hwcb_reginit(struct bfa_s *bfa)
24{ 24{
25 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 25 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
26 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 26 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
27 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 27 int fn = bfa_ioc_pcifn(&bfa->ioc);
28 28
29 if (fn == 0) { 29 if (fn == 0) {
30 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); 30 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -33,29 +33,6 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
33 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); 33 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
34 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); 34 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
35 } 35 }
36
37 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
38 /*
39 * CPE registers
40 */
41 q = CPE_Q_NUM(fn, i);
42 bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
43 bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
44 bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
45
46 /*
47 * RME registers
48 */
49 q = CPE_Q_NUM(fn, i);
50 bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
51 bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
52 bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
53 }
54}
55
56void
57bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
58{
59} 36}
60 37
61static void 38static void
@@ -65,11 +42,6 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
65 bfa->iocfc.bfa_regs.intr_status); 42 bfa->iocfc.bfa_regs.intr_status);
66} 43}
67 44
68void
69bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
70{
71}
72
73static void 45static void
74bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) 46bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
75{ 47{
@@ -104,43 +76,71 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
104} 76}
105 77
106/* 78/*
79 * Dummy interrupt handler for handling spurious interrupts.
80 */
81static void
82bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
83{
84}
85
86/*
107 * No special setup required for crossbow -- vector assignments are implicit. 87 * No special setup required for crossbow -- vector assignments are implicit.
108 */ 88 */
109void 89void
110bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) 90bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
111{ 91{
112 int i;
113
114 WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); 92 WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
115 93
116 bfa->msix.nvecs = nvecs; 94 bfa->msix.nvecs = nvecs;
117 if (nvecs == 1) { 95 bfa_hwcb_msix_uninstall(bfa);
118 for (i = 0; i < BFA_MSIX_CB_MAX; i++) 96}
97
98void
99bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
100{
101 int i;
102
103 if (bfa->msix.nvecs == 0)
104 return;
105
106 if (bfa->msix.nvecs == 1) {
107 for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
119 bfa->msix.handler[i] = bfa_msix_all; 108 bfa->msix.handler[i] = bfa_msix_all;
120 return; 109 return;
121 } 110 }
122 111
123 for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++) 112 for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
124 bfa->msix.handler[i] = bfa_msix_reqq;
125
126 for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
127 bfa->msix.handler[i] = bfa_msix_rspq;
128
129 for (; i < BFA_MSIX_CB_MAX; i++)
130 bfa->msix.handler[i] = bfa_msix_lpu_err; 113 bfa->msix.handler[i] = bfa_msix_lpu_err;
131} 114}
132 115
133/*
134 * Crossbow -- dummy, interrupts are masked
135 */
136void 116void
137bfa_hwcb_msix_install(struct bfa_s *bfa) 117bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
138{ 118{
119 int i;
120
121 if (bfa->msix.nvecs == 0)
122 return;
123
124 if (bfa->msix.nvecs == 1) {
125 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
126 bfa->msix.handler[i] = bfa_msix_all;
127 return;
128 }
129
130 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
131 bfa->msix.handler[i] = bfa_msix_reqq;
132
133 for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
134 bfa->msix.handler[i] = bfa_msix_rspq;
139} 135}
140 136
141void 137void
142bfa_hwcb_msix_uninstall(struct bfa_s *bfa) 138bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
143{ 139{
140 int i;
141
142 for (i = 0; i < BFI_MSIX_CB_MAX; i++)
143 bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
144} 144}
145 145
146/* 146/*
@@ -156,6 +156,6 @@ bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
156void 156void
157bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) 157bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
158{ 158{
159 *start = BFA_MSIX_RME_Q0; 159 *start = BFI_MSIX_RME_QMIN_CB;
160 *end = BFA_MSIX_RME_Q7; 160 *end = BFI_MSIX_RME_QMAX_CB;
161} 161}
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 21018d98a07b..989bbce9b296 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -17,29 +17,10 @@
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfa_modules.h" 19#include "bfa_modules.h"
20#include "bfi_ctreg.h" 20#include "bfi_reg.h"
21 21
22BFA_TRC_FILE(HAL, IOCFC_CT); 22BFA_TRC_FILE(HAL, IOCFC_CT);
23 23
24static u32 __ct_msix_err_vec_reg[] = {
25 HOST_MSIX_ERR_INDEX_FN0,
26 HOST_MSIX_ERR_INDEX_FN1,
27 HOST_MSIX_ERR_INDEX_FN2,
28 HOST_MSIX_ERR_INDEX_FN3,
29};
30
31static void
32bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
33{
34 int fn = bfa_ioc_pcifn(&bfa->ioc);
35 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
36
37 if (msix)
38 writel(vec, kva + __ct_msix_err_vec_reg[fn]);
39 else
40 writel(0, kva + __ct_msix_err_vec_reg[fn]);
41}
42
43/* 24/*
44 * Dummy interrupt handler for handling spurious interrupt during chip-reinit. 25 * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
45 */ 26 */
@@ -53,7 +34,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
53{ 34{
54 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; 35 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
55 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); 36 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
56 int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); 37 int fn = bfa_ioc_pcifn(&bfa->ioc);
57 38
58 if (fn == 0) { 39 if (fn == 0) {
59 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); 40 bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -62,26 +43,16 @@ bfa_hwct_reginit(struct bfa_s *bfa)
62 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); 43 bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
63 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); 44 bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
64 } 45 }
46}
65 47
66 for (i = 0; i < BFI_IOC_MAX_CQS; i++) { 48void
67 /* 49bfa_hwct2_reginit(struct bfa_s *bfa)
68 * CPE registers 50{
69 */ 51 struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
70 q = CPE_Q_NUM(fn, i); 52 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
71 bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5)); 53
72 bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5)); 54 bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS);
73 bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5)); 55 bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK);
74 bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5));
75
76 /*
77 * RME registers
78 */
79 q = CPE_Q_NUM(fn, i);
80 bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5));
81 bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5));
82 bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5));
83 bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5));
84 }
85} 56}
86 57
87void 58void
@@ -106,9 +77,9 @@ void
106bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, 77bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
107 u32 *num_vecs, u32 *max_vec_bit) 78 u32 *num_vecs, u32 *max_vec_bit)
108{ 79{
109 *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1; 80 *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1;
110 *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1)); 81 *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1));
111 *num_vecs = BFA_MSIX_CT_MAX; 82 *num_vecs = BFI_MSIX_CT_MAX;
112} 83}
113 84
114/* 85/*
@@ -117,7 +88,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
117void 88void
118bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) 89bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
119{ 90{
120 WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX)); 91 WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
121 bfa_trc(bfa, nvecs); 92 bfa_trc(bfa, nvecs);
122 93
123 bfa->msix.nvecs = nvecs; 94 bfa->msix.nvecs = nvecs;
@@ -125,7 +96,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
125} 96}
126 97
127void 98void
128bfa_hwct_msix_install(struct bfa_s *bfa) 99bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
100{
101 if (bfa->msix.nvecs == 0)
102 return;
103
104 if (bfa->msix.nvecs == 1)
105 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
106 else
107 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
108}
109
110void
111bfa_hwct_msix_queue_install(struct bfa_s *bfa)
129{ 112{
130 int i; 113 int i;
131 114
@@ -133,19 +116,16 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
133 return; 116 return;
134 117
135 if (bfa->msix.nvecs == 1) { 118 if (bfa->msix.nvecs == 1) {
136 for (i = 0; i < BFA_MSIX_CT_MAX; i++) 119 for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
137 bfa->msix.handler[i] = bfa_msix_all; 120 bfa->msix.handler[i] = bfa_msix_all;
138 return; 121 return;
139 } 122 }
140 123
141 for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++) 124 for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++)
142 bfa->msix.handler[i] = bfa_msix_reqq; 125 bfa->msix.handler[i] = bfa_msix_reqq;
143 126
144 for (; i <= BFA_MSIX_RME_Q3; i++) 127 for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
145 bfa->msix.handler[i] = bfa_msix_rspq; 128 bfa->msix.handler[i] = bfa_msix_rspq;
146
147 WARN_ON(i != BFA_MSIX_LPU_ERR);
148 bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
149} 129}
150 130
151void 131void
@@ -153,7 +133,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
153{ 133{
154 int i; 134 int i;
155 135
156 for (i = 0; i < BFA_MSIX_CT_MAX; i++) 136 for (i = 0; i < BFI_MSIX_CT_MAX; i++)
157 bfa->msix.handler[i] = bfa_hwct_msix_dummy; 137 bfa->msix.handler[i] = bfa_hwct_msix_dummy;
158} 138}
159 139
@@ -164,13 +144,12 @@ void
164bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 144bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
165{ 145{
166 bfa_trc(bfa, 0); 146 bfa_trc(bfa, 0);
167 bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR);
168 bfa_ioc_isr_mode_set(&bfa->ioc, msix); 147 bfa_ioc_isr_mode_set(&bfa->ioc, msix);
169} 148}
170 149
171void 150void
172bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) 151bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
173{ 152{
174 *start = BFA_MSIX_RME_Q0; 153 *start = BFI_MSIX_RME_QMIN_CT;
175 *end = BFA_MSIX_RME_Q3; 154 *end = BFI_MSIX_RME_QMAX_CT;
176} 155}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 6c7e0339dda4..d6c2bf3865d2 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -17,7 +17,7 @@
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfa_ioc.h" 19#include "bfa_ioc.h"
20#include "bfi_ctreg.h" 20#include "bfi_reg.h"
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22#include "bfa_defs_svc.h" 22#include "bfa_defs_svc.h"
23 23
@@ -29,8 +29,8 @@ BFA_TRC_FILE(CNA, IOC);
29#define BFA_IOC_TOV 3000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
30#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
31#define BFA_IOC_HB_TOV 500 /* msecs */ 31#define BFA_IOC_HB_TOV 500 /* msecs */
32#define BFA_IOC_HWINIT_MAX 5
33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 32#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
33#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
34 34
35#define bfa_ioc_timer_start(__ioc) \ 35#define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
@@ -79,14 +79,17 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE;
79static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 79static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81static void bfa_ioc_timeout(void *ioc); 81static void bfa_ioc_timeout(void *ioc);
82static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
82static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); 83static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
83static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); 84static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
84static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); 85static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
85static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); 86static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
86static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 87static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
87static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 88static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
88static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 89static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
89static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); 90static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
91static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event);
90static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 93static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
91static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 94static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
92static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); 95static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
@@ -105,11 +108,12 @@ enum ioc_event {
105 IOC_E_ENABLED = 5, /* f/w enabled */ 108 IOC_E_ENABLED = 5, /* f/w enabled */
106 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ 109 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
107 IOC_E_DISABLED = 7, /* f/w disabled */ 110 IOC_E_DISABLED = 7, /* f/w disabled */
108 IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */ 111 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
109 IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */ 112 IOC_E_HBFAIL = 9, /* heartbeat failure */
110 IOC_E_HBFAIL = 10, /* heartbeat failure */ 113 IOC_E_HWERROR = 10, /* hardware error interrupt */
111 IOC_E_HWERROR = 11, /* hardware error interrupt */ 114 IOC_E_TIMEOUT = 11, /* timeout */
112 IOC_E_TIMEOUT = 12, /* timeout */ 115 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
116 IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
113}; 117};
114 118
115bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); 119bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -121,6 +125,8 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); 125bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); 126bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); 127bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
124 130
125static struct bfa_sm_table_s ioc_sm_table[] = { 131static struct bfa_sm_table_s ioc_sm_table[] = {
126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 132 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -132,6 +138,8 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 138 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
141 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
142 {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
135}; 143};
136 144
137/* 145/*
@@ -143,9 +151,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
143 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) 151 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
144#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) 152#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
145 153
146#define bfa_iocpf_recovery_timer_start(__ioc) \ 154#define bfa_iocpf_poll_timer_start(__ioc) \
147 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 155 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
148 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER) 156 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
149 157
150#define bfa_sem_timer_start(__ioc) \ 158#define bfa_sem_timer_start(__ioc) \
151 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ 159 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
@@ -157,6 +165,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
157 */ 165 */
158static void bfa_iocpf_timeout(void *ioc_arg); 166static void bfa_iocpf_timeout(void *ioc_arg);
159static void bfa_iocpf_sem_timeout(void *ioc_arg); 167static void bfa_iocpf_sem_timeout(void *ioc_arg);
168static void bfa_iocpf_poll_timeout(void *ioc_arg);
160 169
161/* 170/*
162 * IOCPF state machine events 171 * IOCPF state machine events
@@ -173,6 +182,7 @@ enum iocpf_event {
173 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */ 182 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
174 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */ 183 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
175 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ 184 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
185 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
176}; 186};
177 187
178/* 188/*
@@ -314,11 +324,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
314 /* !!! fall through !!! */ 324 /* !!! fall through !!! */
315 case IOC_E_HWERROR: 325 case IOC_E_HWERROR:
316 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
317 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
318 if (event != IOC_E_PFFAILED) 328 if (event != IOC_E_PFFAILED)
319 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 329 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
320 break; 330 break;
321 331
332 case IOC_E_HWFAILED:
333 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
334 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
335 break;
336
322 case IOC_E_DISABLE: 337 case IOC_E_DISABLE:
323 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 338 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
324 break; 339 break;
@@ -356,17 +371,23 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
356 case IOC_E_FWRSP_GETATTR: 371 case IOC_E_FWRSP_GETATTR:
357 bfa_ioc_timer_stop(ioc); 372 bfa_ioc_timer_stop(ioc);
358 bfa_ioc_check_attr_wwns(ioc); 373 bfa_ioc_check_attr_wwns(ioc);
374 bfa_ioc_hb_monitor(ioc);
359 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 375 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
360 break; 376 break;
361 377
378 case IOC_E_FWRSP_ACQ_ADDR:
379 bfa_ioc_timer_stop(ioc);
380 bfa_ioc_hb_monitor(ioc);
381 bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
362 break; 382 break;
383
363 case IOC_E_PFFAILED: 384 case IOC_E_PFFAILED:
364 case IOC_E_HWERROR: 385 case IOC_E_HWERROR:
365 bfa_ioc_timer_stop(ioc); 386 bfa_ioc_timer_stop(ioc);
366 /* !!! fall through !!! */ 387 /* !!! fall through !!! */
367 case IOC_E_TIMEOUT: 388 case IOC_E_TIMEOUT:
368 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 389 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
369 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 390 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
370 if (event != IOC_E_PFFAILED) 391 if (event != IOC_E_PFFAILED)
371 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
372 break; 393 break;
@@ -384,6 +405,50 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
384 } 405 }
385} 406}
386 407
408/*
409 * Acquiring address from fabric (entry function)
410 */
411static void
412bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
413{
414}
415
416/*
417 * Acquiring address from the fabric
418 */
419static void
420bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
421{
422 bfa_trc(ioc, event);
423
424 switch (event) {
425 case IOC_E_FWRSP_GETATTR:
426 bfa_ioc_check_attr_wwns(ioc);
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
428 break;
429
430 case IOC_E_PFFAILED:
431 case IOC_E_HWERROR:
432 bfa_hb_timer_stop(ioc);
433 case IOC_E_HBFAIL:
434 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
435 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
436 if (event != IOC_E_PFFAILED)
437 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
438 break;
439
440 case IOC_E_DISABLE:
441 bfa_hb_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 break;
444
445 case IOC_E_ENABLE:
446 break;
447
448 default:
449 bfa_sm_fault(ioc, event);
450 }
451}
387 452
388static void 453static void
389bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) 454bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
@@ -391,7 +456,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
391 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 456 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
392 457
393 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
394 bfa_ioc_hb_monitor(ioc); 459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
395 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); 460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
396} 461}
397 462
@@ -414,13 +479,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
414 bfa_hb_timer_stop(ioc); 479 bfa_hb_timer_stop(ioc);
415 /* !!! fall through !!! */ 480 /* !!! fall through !!! */
416 case IOC_E_HBFAIL: 481 case IOC_E_HBFAIL:
417 bfa_ioc_fail_notify(ioc);
418
419 if (ioc->iocpf.auto_recover) 482 if (ioc->iocpf.auto_recover)
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 483 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
421 else 484 else
422 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 485 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
423 486
487 bfa_ioc_fail_notify(ioc);
488
424 if (event != IOC_E_PFFAILED) 489 if (event != IOC_E_PFFAILED)
425 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 490 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
426 break; 491 break;
@@ -461,6 +526,11 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
461 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 526 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
462 break; 527 break;
463 528
529 case IOC_E_HWFAILED:
530 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
531 bfa_ioc_disable_comp(ioc);
532 break;
533
464 default: 534 default:
465 bfa_sm_fault(ioc, event); 535 bfa_sm_fault(ioc, event);
466 } 536 }
@@ -525,12 +595,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
525 * Initialization retry failed. 595 * Initialization retry failed.
526 */ 596 */
527 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 597 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
598 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
528 if (event != IOC_E_PFFAILED) 599 if (event != IOC_E_PFFAILED)
529 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 600 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
530 break; 601 break;
531 602
532 case IOC_E_INITFAILED: 603 case IOC_E_HWFAILED:
533 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 604 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
605 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
534 break; 606 break;
535 607
536 case IOC_E_ENABLE: 608 case IOC_E_ENABLE:
@@ -590,6 +662,35 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
590 } 662 }
591} 663}
592 664
665static void
666bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
667{
668 bfa_trc(ioc, 0);
669}
670
671static void
672bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
673{
674 bfa_trc(ioc, event);
675
676 switch (event) {
677 case IOC_E_ENABLE:
678 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
679 break;
680
681 case IOC_E_DISABLE:
682 ioc->cbfn->disable_cbfn(ioc->bfa);
683 break;
684
685 case IOC_E_DETACH:
686 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
687 break;
688
689 default:
690 bfa_sm_fault(ioc, event);
691 }
692}
693
593/* 694/*
594 * IOCPF State Machine 695 * IOCPF State Machine
595 */ 696 */
@@ -600,7 +701,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
600static void 701static void
601bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) 702bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
602{ 703{
603 iocpf->retry_count = 0; 704 iocpf->fw_mismatch_notified = BFA_FALSE;
604 iocpf->auto_recover = bfa_auto_recover; 705 iocpf->auto_recover = bfa_auto_recover;
605} 706}
606 707
@@ -633,6 +734,28 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
633static void 734static void
634bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) 735bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
635{ 736{
737 struct bfi_ioc_image_hdr_s fwhdr;
738 u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
739
740 /* h/w sem init */
741 if (fwstate == BFI_IOC_UNINIT)
742 goto sem_get;
743
744 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
745
746 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
747 goto sem_get;
748
749 bfa_trc(iocpf->ioc, fwstate);
750 bfa_trc(iocpf->ioc, fwhdr.exec);
751 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
752
753 /*
754 * Try to lock and then unlock the semaphore.
755 */
756 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
757 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
758sem_get:
636 bfa_ioc_hw_sem_get(iocpf->ioc); 759 bfa_ioc_hw_sem_get(iocpf->ioc);
637} 760}
638 761
@@ -650,7 +773,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
650 case IOCPF_E_SEMLOCKED: 773 case IOCPF_E_SEMLOCKED:
651 if (bfa_ioc_firmware_lock(ioc)) { 774 if (bfa_ioc_firmware_lock(ioc)) {
652 if (bfa_ioc_sync_start(ioc)) { 775 if (bfa_ioc_sync_start(ioc)) {
653 iocpf->retry_count = 0;
654 bfa_ioc_sync_join(ioc); 776 bfa_ioc_sync_join(ioc);
655 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 777 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
656 } else { 778 } else {
@@ -664,6 +786,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
664 } 786 }
665 break; 787 break;
666 788
789 case IOCPF_E_SEM_ERROR:
790 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
791 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
792 break;
793
667 case IOCPF_E_DISABLE: 794 case IOCPF_E_DISABLE:
668 bfa_sem_timer_stop(ioc); 795 bfa_sem_timer_stop(ioc);
669 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 796 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
@@ -689,10 +816,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
689 /* 816 /*
690 * Call only the first time sm enters fwmismatch state. 817 * Call only the first time sm enters fwmismatch state.
691 */ 818 */
692 if (iocpf->retry_count == 0) 819 if (iocpf->fw_mismatch_notified == BFA_FALSE)
693 bfa_ioc_pf_fwmismatch(iocpf->ioc); 820 bfa_ioc_pf_fwmismatch(iocpf->ioc);
694 821
695 iocpf->retry_count++; 822 iocpf->fw_mismatch_notified = BFA_TRUE;
696 bfa_iocpf_timer_start(iocpf->ioc); 823 bfa_iocpf_timer_start(iocpf->ioc);
697} 824}
698 825
@@ -757,6 +884,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
757 } 884 }
758 break; 885 break;
759 886
887 case IOCPF_E_SEM_ERROR:
888 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
889 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
890 break;
891
760 case IOCPF_E_DISABLE: 892 case IOCPF_E_DISABLE:
761 bfa_sem_timer_stop(ioc); 893 bfa_sem_timer_stop(ioc);
762 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 894 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -770,7 +902,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
770static void 902static void
771bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) 903bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
772{ 904{
773 bfa_iocpf_timer_start(iocpf->ioc); 905 iocpf->poll_time = 0;
774 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); 906 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
775} 907}
776 908
@@ -787,20 +919,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
787 919
788 switch (event) { 920 switch (event) {
789 case IOCPF_E_FWREADY: 921 case IOCPF_E_FWREADY:
790 bfa_iocpf_timer_stop(ioc);
791 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
792 break; 923 break;
793 924
794 case IOCPF_E_INITFAIL:
795 bfa_iocpf_timer_stop(ioc);
796 /*
797 * !!! fall through !!!
798 */
799
800 case IOCPF_E_TIMEOUT: 925 case IOCPF_E_TIMEOUT:
801 writel(1, ioc->ioc_regs.ioc_sem_reg); 926 writel(1, ioc->ioc_regs.ioc_sem_reg);
802 if (event == IOCPF_E_TIMEOUT) 927 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
803 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
804 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
805 break; 929 break;
806 930
@@ -820,6 +944,10 @@ static void
820bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) 944bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
821{ 945{
822 bfa_iocpf_timer_start(iocpf->ioc); 946 bfa_iocpf_timer_start(iocpf->ioc);
947 /*
948 * Enable Interrupts before sending fw IOC ENABLE cmd.
949 */
950 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
823 bfa_ioc_send_enable(iocpf->ioc); 951 bfa_ioc_send_enable(iocpf->ioc);
824} 952}
825 953
@@ -860,10 +988,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
860 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
861 break; 989 break;
862 990
863 case IOCPF_E_FWREADY:
864 bfa_ioc_send_enable(ioc);
865 break;
866
867 default: 991 default:
868 bfa_sm_fault(ioc, event); 992 bfa_sm_fault(ioc, event);
869 } 993 }
@@ -895,16 +1019,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
895 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 1019 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
896 break; 1020 break;
897 1021
898 case IOCPF_E_FWREADY:
899 if (bfa_ioc_is_operational(ioc)) {
900 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
901 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
902 } else {
903 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
904 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
905 }
906 break;
907
908 default: 1022 default:
909 bfa_sm_fault(ioc, event); 1023 bfa_sm_fault(ioc, event);
910 } 1024 }
@@ -929,7 +1043,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
929 1043
930 switch (event) { 1044 switch (event) {
931 case IOCPF_E_FWRSP_DISABLE: 1045 case IOCPF_E_FWRSP_DISABLE:
932 case IOCPF_E_FWREADY:
933 bfa_iocpf_timer_stop(ioc); 1046 bfa_iocpf_timer_stop(ioc);
934 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
935 break; 1048 break;
@@ -976,6 +1089,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
976 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
977 break; 1090 break;
978 1091
1092 case IOCPF_E_SEM_ERROR:
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1094 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1095 break;
1096
979 case IOCPF_E_FAIL: 1097 case IOCPF_E_FAIL:
980 break; 1098 break;
981 1099
@@ -990,6 +1108,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
990static void 1108static void
991bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) 1109bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
992{ 1110{
1111 bfa_ioc_mbox_flush(iocpf->ioc);
993 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); 1112 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
994} 1113}
995 1114
@@ -1002,7 +1121,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1002 1121
1003 switch (event) { 1122 switch (event) {
1004 case IOCPF_E_ENABLE: 1123 case IOCPF_E_ENABLE:
1005 iocpf->retry_count = 0;
1006 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1124 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1007 break; 1125 break;
1008 1126
@@ -1019,6 +1137,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1019static void 1137static void
1020bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf) 1138bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1021{ 1139{
1140 bfa_ioc_debug_save_ftrc(iocpf->ioc);
1022 bfa_ioc_hw_sem_get(iocpf->ioc); 1141 bfa_ioc_hw_sem_get(iocpf->ioc);
1023} 1142}
1024 1143
@@ -1035,20 +1154,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1035 switch (event) { 1154 switch (event) {
1036 case IOCPF_E_SEMLOCKED: 1155 case IOCPF_E_SEMLOCKED:
1037 bfa_ioc_notify_fail(ioc); 1156 bfa_ioc_notify_fail(ioc);
1038 bfa_ioc_sync_ack(ioc); 1157 bfa_ioc_sync_leave(ioc);
1039 iocpf->retry_count++; 1158 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1040 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { 1159 writel(1, ioc->ioc_regs.ioc_sem_reg);
1041 bfa_ioc_sync_leave(ioc); 1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1042 writel(1, ioc->ioc_regs.ioc_sem_reg); 1161 break;
1043 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 1162
1044 } else { 1163 case IOCPF_E_SEM_ERROR:
1045 if (bfa_ioc_sync_complete(ioc)) 1164 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1046 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1165 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1047 else {
1048 writel(1, ioc->ioc_regs.ioc_sem_reg);
1049 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1050 }
1051 }
1052 break; 1166 break;
1053 1167
1054 case IOCPF_E_DISABLE: 1168 case IOCPF_E_DISABLE:
@@ -1073,7 +1187,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1073static void 1187static void
1074bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) 1188bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1075{ 1189{
1076 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED); 1190 bfa_trc(iocpf->ioc, 0);
1077} 1191}
1078 1192
1079/* 1193/*
@@ -1112,7 +1226,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1112 /* 1226 /*
1113 * Flush any queued up mailbox requests. 1227 * Flush any queued up mailbox requests.
1114 */ 1228 */
1115 bfa_ioc_mbox_hbfail(iocpf->ioc); 1229 bfa_ioc_mbox_flush(iocpf->ioc);
1116 1230
1117 bfa_ioc_hw_sem_get(iocpf->ioc); 1231 bfa_ioc_hw_sem_get(iocpf->ioc);
1118} 1232}
@@ -1126,11 +1240,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1126 1240
1127 switch (event) { 1241 switch (event) {
1128 case IOCPF_E_SEMLOCKED: 1242 case IOCPF_E_SEMLOCKED:
1129 iocpf->retry_count = 0;
1130 bfa_ioc_sync_ack(ioc); 1243 bfa_ioc_sync_ack(ioc);
1131 bfa_ioc_notify_fail(ioc); 1244 bfa_ioc_notify_fail(ioc);
1132 if (!iocpf->auto_recover) { 1245 if (!iocpf->auto_recover) {
1133 bfa_ioc_sync_leave(ioc); 1246 bfa_ioc_sync_leave(ioc);
1247 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1134 writel(1, ioc->ioc_regs.ioc_sem_reg); 1248 writel(1, ioc->ioc_regs.ioc_sem_reg);
1135 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1249 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1136 } else { 1250 } else {
@@ -1143,6 +1257,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1143 } 1257 }
1144 break; 1258 break;
1145 1259
1260 case IOCPF_E_SEM_ERROR:
1261 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1262 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1263 break;
1264
1146 case IOCPF_E_DISABLE: 1265 case IOCPF_E_DISABLE:
1147 bfa_sem_timer_stop(ioc); 1266 bfa_sem_timer_stop(ioc);
1148 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1267 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -1159,6 +1278,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1159static void 1278static void
1160bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1279bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1161{ 1280{
1281 bfa_trc(iocpf->ioc, 0);
1162} 1282}
1163 1283
1164/* 1284/*
@@ -1185,23 +1305,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1185 * BFA IOC private functions 1305 * BFA IOC private functions
1186 */ 1306 */
1187 1307
1308/*
1309 * Notify common modules registered for notification.
1310 */
1188static void 1311static void
1189bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) 1312bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1190{ 1313{
1191 struct list_head *qe; 1314 struct bfa_ioc_notify_s *notify;
1192 struct bfa_ioc_hbfail_notify_s *notify; 1315 struct list_head *qe;
1193 1316
1194 ioc->cbfn->disable_cbfn(ioc->bfa); 1317 list_for_each(qe, &ioc->notify_q) {
1195 1318 notify = (struct bfa_ioc_notify_s *)qe;
1196 /* 1319 notify->cbfn(notify->cbarg, event);
1197 * Notify common modules registered for notification.
1198 */
1199 list_for_each(qe, &ioc->hb_notify_q) {
1200 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1201 notify->cbfn(notify->cbarg);
1202 } 1320 }
1203} 1321}
1204 1322
1323static void
1324bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1325{
1326 ioc->cbfn->disable_cbfn(ioc->bfa);
1327 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1328}
1329
1205bfa_boolean_t 1330bfa_boolean_t
1206bfa_ioc_sem_get(void __iomem *sem_reg) 1331bfa_ioc_sem_get(void __iomem *sem_reg)
1207{ 1332{
@@ -1211,16 +1336,15 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
1211 1336
1212 r32 = readl(sem_reg); 1337 r32 = readl(sem_reg);
1213 1338
1214 while (r32 && (cnt < BFA_SEM_SPINCNT)) { 1339 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1215 cnt++; 1340 cnt++;
1216 udelay(2); 1341 udelay(2);
1217 r32 = readl(sem_reg); 1342 r32 = readl(sem_reg);
1218 } 1343 }
1219 1344
1220 if (r32 == 0) 1345 if (!(r32 & 1))
1221 return BFA_TRUE; 1346 return BFA_TRUE;
1222 1347
1223 WARN_ON(cnt >= BFA_SEM_SPINCNT);
1224 return BFA_FALSE; 1348 return BFA_FALSE;
1225} 1349}
1226 1350
@@ -1234,7 +1358,12 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1234 * will return 1. Semaphore is released by writing 1 to the register 1358 * will return 1. Semaphore is released by writing 1 to the register
1235 */ 1359 */
1236 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1360 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1237 if (r32 == 0) { 1361 if (r32 == ~0) {
1362 WARN_ON(r32 == ~0);
1363 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1364 return;
1365 }
1366 if (!(r32 & 1)) {
1238 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1367 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1239 return; 1368 return;
1240 } 1369 }
@@ -1343,7 +1472,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1343 int i; 1472 int i;
1344 1473
1345 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1474 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1346 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1475 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1347 1476
1348 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1477 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1349 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { 1478 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -1369,7 +1498,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1369 1498
1370 bfa_ioc_fwver_get(ioc, &fwhdr); 1499 bfa_ioc_fwver_get(ioc, &fwhdr);
1371 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1500 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1372 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1501 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1373 1502
1374 if (fwhdr.signature != drv_fwhdr->signature) { 1503 if (fwhdr.signature != drv_fwhdr->signature) {
1375 bfa_trc(ioc, fwhdr.signature); 1504 bfa_trc(ioc, fwhdr.signature);
@@ -1377,8 +1506,8 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1377 return BFA_FALSE; 1506 return BFA_FALSE;
1378 } 1507 }
1379 1508
1380 if (swab32(fwhdr.param) != boot_env) { 1509 if (swab32(fwhdr.bootenv) != boot_env) {
1381 bfa_trc(ioc, fwhdr.param); 1510 bfa_trc(ioc, fwhdr.bootenv);
1382 bfa_trc(ioc, boot_env); 1511 bfa_trc(ioc, boot_env);
1383 return BFA_FALSE; 1512 return BFA_FALSE;
1384 } 1513 }
@@ -1414,8 +1543,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1414 1543
1415 bfa_trc(ioc, ioc_fwstate); 1544 bfa_trc(ioc, ioc_fwstate);
1416 1545
1417 boot_type = BFI_BOOT_TYPE_NORMAL; 1546 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1418 boot_env = BFI_BOOT_LOADER_OS; 1547 boot_env = BFI_FWBOOT_ENV_OS;
1419 1548
1420 /* 1549 /*
1421 * check if firmware is valid 1550 * check if firmware is valid
@@ -1425,6 +1554,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1425 1554
1426 if (!fwvalid) { 1555 if (!fwvalid) {
1427 bfa_ioc_boot(ioc, boot_type, boot_env); 1556 bfa_ioc_boot(ioc, boot_type, boot_env);
1557 bfa_ioc_poll_fwinit(ioc);
1428 return; 1558 return;
1429 } 1559 }
1430 1560
@@ -1433,7 +1563,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1433 * just wait for an initialization completion interrupt. 1563 * just wait for an initialization completion interrupt.
1434 */ 1564 */
1435 if (ioc_fwstate == BFI_IOC_INITING) { 1565 if (ioc_fwstate == BFI_IOC_INITING) {
1436 ioc->cbfn->reset_cbfn(ioc->bfa); 1566 bfa_ioc_poll_fwinit(ioc);
1437 return; 1567 return;
1438 } 1568 }
1439 1569
@@ -1452,7 +1582,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1452 * be flushed. Otherwise MSI-X interrupts are not delivered. 1582 * be flushed. Otherwise MSI-X interrupts are not delivered.
1453 */ 1583 */
1454 bfa_ioc_msgflush(ioc); 1584 bfa_ioc_msgflush(ioc);
1455 ioc->cbfn->reset_cbfn(ioc->bfa);
1456 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1585 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1457 return; 1586 return;
1458 } 1587 }
@@ -1461,6 +1590,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1461 * Initialize the h/w for any other states. 1590 * Initialize the h/w for any other states.
1462 */ 1591 */
1463 bfa_ioc_boot(ioc, boot_type, boot_env); 1592 bfa_ioc_boot(ioc, boot_type, boot_env);
1593 bfa_ioc_poll_fwinit(ioc);
1464} 1594}
1465 1595
1466static void 1596static void
@@ -1508,7 +1638,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1508 1638
1509 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1639 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1510 bfa_ioc_portid(ioc)); 1640 bfa_ioc_portid(ioc));
1511 enable_req.ioc_class = ioc->ioc_mc; 1641 enable_req.clscode = cpu_to_be16(ioc->clscode);
1512 do_gettimeofday(&tv); 1642 do_gettimeofday(&tv);
1513 enable_req.tv_sec = be32_to_cpu(tv.tv_sec); 1643 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1514 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1644 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
@@ -1572,25 +1702,26 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1572 u32 loff = 0; 1702 u32 loff = 0;
1573 u32 chunkno = 0; 1703 u32 chunkno = 0;
1574 u32 i; 1704 u32 i;
1705 u32 asicmode;
1575 1706
1576 /* 1707 /*
1577 * Initialize LMEM first before code download 1708 * Initialize LMEM first before code download
1578 */ 1709 */
1579 bfa_ioc_lmem_init(ioc); 1710 bfa_ioc_lmem_init(ioc);
1580 1711
1581 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); 1712 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1582 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); 1713 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1583 1714
1584 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); 1715 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1585 pgoff = PSS_SMEM_PGOFF(loff); 1716 pgoff = PSS_SMEM_PGOFF(loff);
1586 1717
1587 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1718 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1588 1719
1589 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { 1720 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1590 1721
1591 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1722 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1592 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1723 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1593 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 1724 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1594 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1725 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1595 } 1726 }
1596 1727
@@ -1616,11 +1747,15 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1616 ioc->ioc_regs.host_page_num_fn); 1747 ioc->ioc_regs.host_page_num_fn);
1617 1748
1618 /* 1749 /*
1619 * Set boot type and boot param at the end. 1750 * Set boot type and device mode at the end.
1620 */ 1751 */
1621 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, 1752 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1753 ioc->port0_mode, ioc->port1_mode);
1754 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1755 swab32(asicmode));
1756 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1622 swab32(boot_type)); 1757 swab32(boot_type));
1623 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, 1758 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1624 swab32(boot_env)); 1759 swab32(boot_env));
1625} 1760}
1626 1761
@@ -1636,6 +1771,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1636 attr->adapter_prop = be32_to_cpu(attr->adapter_prop); 1771 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1637 attr->card_type = be32_to_cpu(attr->card_type); 1772 attr->card_type = be32_to_cpu(attr->card_type);
1638 attr->maxfrsize = be16_to_cpu(attr->maxfrsize); 1773 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1774 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1639 1775
1640 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); 1776 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1641} 1777}
@@ -1690,7 +1826,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1690 * Cleanup any pending requests. 1826 * Cleanup any pending requests.
1691 */ 1827 */
1692static void 1828static void
1693bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) 1829bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1694{ 1830{
1695 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; 1831 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1696 struct bfa_mbox_cmd_s *cmd; 1832 struct bfa_mbox_cmd_s *cmd;
@@ -1752,6 +1888,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1752 /* 1888 /*
1753 * release semaphore. 1889 * release semaphore.
1754 */ 1890 */
1891 readl(ioc->ioc_regs.ioc_init_sem_reg);
1755 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1892 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1756 1893
1757 bfa_trc(ioc, pgnum); 1894 bfa_trc(ioc, pgnum);
@@ -1808,6 +1945,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1808 /* 1945 /*
1809 * release semaphore. 1946 * release semaphore.
1810 */ 1947 */
1948 readl(ioc->ioc_regs.ioc_init_sem_reg);
1811 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 1949 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1812 bfa_trc(ioc, pgnum); 1950 bfa_trc(ioc, pgnum);
1813 return BFA_STATUS_OK; 1951 return BFA_STATUS_OK;
@@ -1816,18 +1954,13 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1816static void 1954static void
1817bfa_ioc_fail_notify(struct bfa_ioc_s *ioc) 1955bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1818{ 1956{
1819 struct list_head *qe;
1820 struct bfa_ioc_hbfail_notify_s *notify;
1821 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 1957 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1822 1958
1823 /* 1959 /*
1824 * Notify driver and common modules registered for notification. 1960 * Notify driver and common modules registered for notification.
1825 */ 1961 */
1826 ioc->cbfn->hbfail_cbfn(ioc->bfa); 1962 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1827 list_for_each(qe, &ioc->hb_notify_q) { 1963 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1828 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1829 notify->cbfn(notify->cbarg);
1830 }
1831 1964
1832 bfa_ioc_debug_save_ftrc(ioc); 1965 bfa_ioc_debug_save_ftrc(ioc);
1833 1966
@@ -1864,6 +1997,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1864 /* 1997 /*
1865 * release semaphore. 1998 * release semaphore.
1866 */ 1999 */
2000 readl(ioc->ioc_regs.ioc_init_sem_reg);
1867 writel(1, ioc->ioc_regs.ioc_init_sem_reg); 2001 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1868 2002
1869 return BFA_STATUS_OK; 2003 return BFA_STATUS_OK;
@@ -1876,8 +2010,6 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1876void 2010void
1877bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) 2011bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1878{ 2012{
1879 void __iomem *rb;
1880
1881 bfa_ioc_stats(ioc, ioc_boots); 2013 bfa_ioc_stats(ioc, ioc_boots);
1882 2014
1883 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 2015 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
@@ -1886,22 +2018,16 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1886 /* 2018 /*
1887 * Initialize IOC state of all functions on a chip reset. 2019 * Initialize IOC state of all functions on a chip reset.
1888 */ 2020 */
1889 rb = ioc->pcidev.pci_bar_kva; 2021 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1890 if (boot_type == BFI_BOOT_TYPE_MEMTEST) { 2022 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1891 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 2023 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1892 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1893 } else { 2024 } else {
1894 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG)); 2025 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1895 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG)); 2026 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1896 } 2027 }
1897 2028
1898 bfa_ioc_msgflush(ioc); 2029 bfa_ioc_msgflush(ioc);
1899 bfa_ioc_download_fw(ioc, boot_type, boot_env); 2030 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1900
1901 /*
1902 * Enable interrupts just before starting LPU
1903 */
1904 ioc->cbfn->reset_cbfn(ioc->bfa);
1905 bfa_ioc_lpu_start(ioc); 2031 bfa_ioc_lpu_start(ioc);
1906} 2032}
1907 2033
@@ -1932,13 +2058,17 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1932 (r32 != BFI_IOC_MEMTEST)); 2058 (r32 != BFI_IOC_MEMTEST));
1933} 2059}
1934 2060
1935void 2061bfa_boolean_t
1936bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) 2062bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1937{ 2063{
1938 __be32 *msgp = mbmsg; 2064 __be32 *msgp = mbmsg;
1939 u32 r32; 2065 u32 r32;
1940 int i; 2066 int i;
1941 2067
2068 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2069 if ((r32 & 1) == 0)
2070 return BFA_FALSE;
2071
1942 /* 2072 /*
1943 * read the MBOX msg 2073 * read the MBOX msg
1944 */ 2074 */
@@ -1954,6 +2084,8 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1954 */ 2084 */
1955 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 2085 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1956 readl(ioc->ioc_regs.lpu_mbox_cmd); 2086 readl(ioc->ioc_regs.lpu_mbox_cmd);
2087
2088 return BFA_TRUE;
1957} 2089}
1958 2090
1959void 2091void
@@ -1970,11 +2102,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1970 case BFI_IOC_I2H_HBEAT: 2102 case BFI_IOC_I2H_HBEAT:
1971 break; 2103 break;
1972 2104
1973 case BFI_IOC_I2H_READY_EVENT:
1974 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1975 break;
1976
1977 case BFI_IOC_I2H_ENABLE_REPLY: 2105 case BFI_IOC_I2H_ENABLE_REPLY:
2106 ioc->port_mode = ioc->port_mode_cfg =
2107 (enum bfa_mode_s)msg->fw_event.port_mode;
2108 ioc->ad_cap_bm = msg->fw_event.cap_bm;
1978 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 2109 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1979 break; 2110 break;
1980 2111
@@ -1986,6 +2117,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1986 bfa_ioc_getattr_reply(ioc); 2117 bfa_ioc_getattr_reply(ioc);
1987 break; 2118 break;
1988 2119
2120 case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2121 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2122 break;
2123
1989 default: 2124 default:
1990 bfa_trc(ioc, msg->mh.msg_id); 2125 bfa_trc(ioc, msg->mh.msg_id);
1991 WARN_ON(1); 2126 WARN_ON(1);
@@ -2011,7 +2146,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2011 ioc->iocpf.ioc = ioc; 2146 ioc->iocpf.ioc = ioc;
2012 2147
2013 bfa_ioc_mbox_attach(ioc); 2148 bfa_ioc_mbox_attach(ioc);
2014 INIT_LIST_HEAD(&ioc->hb_notify_q); 2149 INIT_LIST_HEAD(&ioc->notify_q);
2015 2150
2016 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 2151 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2017 bfa_fsm_send_event(ioc, IOC_E_RESET); 2152 bfa_fsm_send_event(ioc, IOC_E_RESET);
@@ -2024,6 +2159,7 @@ void
2024bfa_ioc_detach(struct bfa_ioc_s *ioc) 2159bfa_ioc_detach(struct bfa_ioc_s *ioc)
2025{ 2160{
2026 bfa_fsm_send_event(ioc, IOC_E_DETACH); 2161 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2162 INIT_LIST_HEAD(&ioc->notify_q);
2027} 2163}
2028 2164
2029/* 2165/*
@@ -2033,20 +2169,80 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
2033 */ 2169 */
2034void 2170void
2035bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 2171bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2036 enum bfi_mclass mc) 2172 enum bfi_pcifn_class clscode)
2037{ 2173{
2038 ioc->ioc_mc = mc; 2174 ioc->clscode = clscode;
2039 ioc->pcidev = *pcidev; 2175 ioc->pcidev = *pcidev;
2040 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 2176
2041 ioc->cna = ioc->ctdev && !ioc->fcmode; 2177 /*
2178 * Initialize IOC and device personality
2179 */
2180 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2181 ioc->asic_mode = BFI_ASIC_MODE_FC;
2182
2183 switch (pcidev->device_id) {
2184 case BFA_PCI_DEVICE_ID_FC_8G1P:
2185 case BFA_PCI_DEVICE_ID_FC_8G2P:
2186 ioc->asic_gen = BFI_ASIC_GEN_CB;
2187 ioc->fcmode = BFA_TRUE;
2188 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2189 ioc->ad_cap_bm = BFA_CM_HBA;
2190 break;
2191
2192 case BFA_PCI_DEVICE_ID_CT:
2193 ioc->asic_gen = BFI_ASIC_GEN_CT;
2194 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2195 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2196 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2197 ioc->ad_cap_bm = BFA_CM_CNA;
2198 break;
2199
2200 case BFA_PCI_DEVICE_ID_CT_FC:
2201 ioc->asic_gen = BFI_ASIC_GEN_CT;
2202 ioc->fcmode = BFA_TRUE;
2203 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2204 ioc->ad_cap_bm = BFA_CM_HBA;
2205 break;
2206
2207 case BFA_PCI_DEVICE_ID_CT2:
2208 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2209 if (clscode == BFI_PCIFN_CLASS_FC &&
2210 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2211 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2212 ioc->fcmode = BFA_TRUE;
2213 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2214 ioc->ad_cap_bm = BFA_CM_HBA;
2215 } else {
2216 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2217 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2218 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2219 ioc->port_mode =
2220 ioc->port_mode_cfg = BFA_MODE_CNA;
2221 ioc->ad_cap_bm = BFA_CM_CNA;
2222 } else {
2223 ioc->port_mode =
2224 ioc->port_mode_cfg = BFA_MODE_NIC;
2225 ioc->ad_cap_bm = BFA_CM_NIC;
2226 }
2227 }
2228 break;
2229
2230 default:
2231 WARN_ON(1);
2232 }
2042 2233
2043 /* 2234 /*
2044 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c 2235 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2045 */ 2236 */
2046 if (ioc->ctdev) 2237 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2047 bfa_ioc_set_ct_hwif(ioc);
2048 else
2049 bfa_ioc_set_cb_hwif(ioc); 2238 bfa_ioc_set_cb_hwif(ioc);
2239 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2240 bfa_ioc_set_ct_hwif(ioc);
2241 else {
2242 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2243 bfa_ioc_set_ct2_hwif(ioc);
2244 bfa_ioc_ct2_poweron(ioc);
2245 }
2050 2246
2051 bfa_ioc_map_port(ioc); 2247 bfa_ioc_map_port(ioc);
2052 bfa_ioc_reg_init(ioc); 2248 bfa_ioc_reg_init(ioc);
@@ -2172,36 +2368,38 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2172 struct bfi_mbmsg_s m; 2368 struct bfi_mbmsg_s m;
2173 int mc; 2369 int mc;
2174 2370
2175 bfa_ioc_msgget(ioc, &m); 2371 if (bfa_ioc_msgget(ioc, &m)) {
2372 /*
2373 * Treat IOC message class as special.
2374 */
2375 mc = m.mh.msg_class;
2376 if (mc == BFI_MC_IOC) {
2377 bfa_ioc_isr(ioc, &m);
2378 return;
2379 }
2176 2380
2177 /* 2381 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2178 * Treat IOC message class as special. 2382 return;
2179 */ 2383
2180 mc = m.mh.msg_class; 2384 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2181 if (mc == BFI_MC_IOC) {
2182 bfa_ioc_isr(ioc, &m);
2183 return;
2184 } 2385 }
2185 2386
2186 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2387 bfa_ioc_lpu_read_stat(ioc);
2187 return;
2188 2388
2189 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2389 /*
2390 * Try to send pending mailbox commands
2391 */
2392 bfa_ioc_mbox_poll(ioc);
2190} 2393}
2191 2394
2192void 2395void
2193bfa_ioc_error_isr(struct bfa_ioc_s *ioc) 2396bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2194{ 2397{
2398 bfa_ioc_stats(ioc, ioc_hbfails);
2399 ioc->stats.hb_count = ioc->hb_count;
2195 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 2400 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2196} 2401}
2197 2402
2198void
2199bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2200{
2201 ioc->fcmode = BFA_TRUE;
2202 ioc->port_id = bfa_ioc_pcifn(ioc);
2203}
2204
2205/* 2403/*
2206 * return true if IOC is disabled 2404 * return true if IOC is disabled
2207 */ 2405 */
@@ -2213,6 +2411,15 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2213} 2411}
2214 2412
2215/* 2413/*
2414 * Return TRUE if IOC is in acquiring address state
2415 */
2416bfa_boolean_t
2417bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2418{
2419 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2420}
2421
2422/*
2216 * return true if IOC firmware is different. 2423 * return true if IOC firmware is different.
2217 */ 2424 */
2218bfa_boolean_t 2425bfa_boolean_t
@@ -2239,17 +2446,16 @@ bfa_boolean_t
2239bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) 2446bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2240{ 2447{
2241 u32 ioc_state; 2448 u32 ioc_state;
2242 void __iomem *rb = ioc->pcidev.pci_bar_kva;
2243 2449
2244 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2450 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2245 return BFA_FALSE; 2451 return BFA_FALSE;
2246 2452
2247 ioc_state = readl(rb + BFA_IOC0_STATE_REG); 2453 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
2248 if (!bfa_ioc_state_disabled(ioc_state)) 2454 if (!bfa_ioc_state_disabled(ioc_state))
2249 return BFA_FALSE; 2455 return BFA_FALSE;
2250 2456
2251 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { 2457 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2252 ioc_state = readl(rb + BFA_IOC1_STATE_REG); 2458 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
2253 if (!bfa_ioc_state_disabled(ioc_state)) 2459 if (!bfa_ioc_state_disabled(ioc_state))
2254 return BFA_FALSE; 2460 return BFA_FALSE;
2255 } 2461 }
@@ -2308,24 +2514,21 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2308 2514
2309 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2515 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2310 2516
2311 ad_attr->cna_capable = ioc->cna; 2517 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2312 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna && 2518 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2313 !ad_attr->is_mezz; 2519 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2314} 2520}
2315 2521
2316enum bfa_ioc_type_e 2522enum bfa_ioc_type_e
2317bfa_ioc_get_type(struct bfa_ioc_s *ioc) 2523bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2318{ 2524{
2319 if (!ioc->ctdev || ioc->fcmode) 2525 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2320 return BFA_IOC_TYPE_FC;
2321 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2322 return BFA_IOC_TYPE_FCoE;
2323 else if (ioc->ioc_mc == BFI_MC_LL)
2324 return BFA_IOC_TYPE_LL; 2526 return BFA_IOC_TYPE_LL;
2325 else { 2527
2326 WARN_ON(ioc->ioc_mc != BFI_MC_LL); 2528 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2327 return BFA_IOC_TYPE_LL; 2529
2328 } 2530 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2531 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2329} 2532}
2330 2533
2331void 2534void
@@ -2384,11 +2587,8 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2384 2587
2385 ioc_attr = ioc->attr; 2588 ioc_attr = ioc->attr;
2386 2589
2387 /*
2388 * model name
2389 */
2390 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", 2590 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2391 BFA_MFG_NAME, ioc_attr->card_type); 2591 BFA_MFG_NAME, ioc_attr->card_type);
2392} 2592}
2393 2593
2394enum bfa_ioc_state 2594enum bfa_ioc_state
@@ -2438,6 +2638,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2438 2638
2439 ioc_attr->state = bfa_ioc_get_state(ioc); 2639 ioc_attr->state = bfa_ioc_get_state(ioc);
2440 ioc_attr->port_id = ioc->port_id; 2640 ioc_attr->port_id = ioc->port_id;
2641 ioc_attr->port_mode = ioc->port_mode;
2642 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2643 ioc_attr->cap_bm = ioc->ad_cap_bm;
2441 2644
2442 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2645 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2443 2646
@@ -2475,12 +2678,6 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2475 return m; 2678 return m;
2476} 2679}
2477 2680
2478bfa_boolean_t
2479bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2480{
2481 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2482}
2483
2484/* 2681/*
2485 * Retrieve saved firmware trace from a prior IOC failure. 2682 * Retrieve saved firmware trace from a prior IOC failure.
2486 */ 2683 */
@@ -2531,7 +2728,7 @@ bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2531 2728
2532 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, 2729 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2533 bfa_ioc_portid(ioc)); 2730 bfa_ioc_portid(ioc));
2534 req->ioc_class = ioc->ioc_mc; 2731 req->clscode = cpu_to_be16(ioc->clscode);
2535 bfa_ioc_mbox_queue(ioc, &cmd); 2732 bfa_ioc_mbox_queue(ioc, &cmd);
2536} 2733}
2537 2734
@@ -2673,6 +2870,7 @@ static void
2673bfa_ioc_recover(struct bfa_ioc_s *ioc) 2870bfa_ioc_recover(struct bfa_ioc_s *ioc)
2674{ 2871{
2675 bfa_ioc_stats(ioc, ioc_hbfails); 2872 bfa_ioc_stats(ioc, ioc_hbfails);
2873 ioc->stats.hb_count = ioc->hb_count;
2676 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2874 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2677} 2875}
2678 2876
@@ -2703,6 +2901,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
2703 bfa_ioc_hw_sem_get(ioc); 2901 bfa_ioc_hw_sem_get(ioc);
2704} 2902}
2705 2903
2904static void
2905bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2906{
2907 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2908
2909 bfa_trc(ioc, fwstate);
2910
2911 if (fwstate == BFI_IOC_DISABLED) {
2912 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2913 return;
2914 }
2915
2916 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2917 bfa_iocpf_timeout(ioc);
2918 else {
2919 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2920 bfa_iocpf_poll_timer_start(ioc);
2921 }
2922}
2923
2924static void
2925bfa_iocpf_poll_timeout(void *ioc_arg)
2926{
2927 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2928
2929 bfa_ioc_poll_fwinit(ioc);
2930}
2931
2706/* 2932/*
2707 * bfa timer function 2933 * bfa timer function
2708 */ 2934 */
@@ -2770,3 +2996,2423 @@ bfa_timer_stop(struct bfa_timer_s *timer)
2770 2996
2771 list_del(&timer->qe); 2997 list_del(&timer->qe);
2772} 2998}
2999
3000/*
3001 * ASIC block related
3002 */
3003static void
3004bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3005{
3006 struct bfa_ablk_cfg_inst_s *cfg_inst;
3007 int i, j;
3008 u16 be16;
3009 u32 be32;
3010
3011 for (i = 0; i < BFA_ABLK_MAX; i++) {
3012 cfg_inst = &cfg->inst[i];
3013 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3014 be16 = cfg_inst->pf_cfg[j].pers;
3015 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3016 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3017 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3018 be16 = cfg_inst->pf_cfg[j].num_vectors;
3019 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3020 be32 = cfg_inst->pf_cfg[j].bw;
3021 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3022 }
3023 }
3024}
3025
3026static void
3027bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3028{
3029 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3030 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3031 bfa_ablk_cbfn_t cbfn;
3032
3033 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3034 bfa_trc(ablk->ioc, msg->mh.msg_id);
3035
3036 switch (msg->mh.msg_id) {
3037 case BFI_ABLK_I2H_QUERY:
3038 if (rsp->status == BFA_STATUS_OK) {
3039 memcpy(ablk->cfg, ablk->dma_addr.kva,
3040 sizeof(struct bfa_ablk_cfg_s));
3041 bfa_ablk_config_swap(ablk->cfg);
3042 ablk->cfg = NULL;
3043 }
3044 break;
3045
3046 case BFI_ABLK_I2H_ADPT_CONFIG:
3047 case BFI_ABLK_I2H_PORT_CONFIG:
3048 /* update config port mode */
3049 ablk->ioc->port_mode_cfg = rsp->port_mode;
3050
3051 case BFI_ABLK_I2H_PF_DELETE:
3052 case BFI_ABLK_I2H_PF_UPDATE:
3053 case BFI_ABLK_I2H_OPTROM_ENABLE:
3054 case BFI_ABLK_I2H_OPTROM_DISABLE:
3055 /* No-op */
3056 break;
3057
3058 case BFI_ABLK_I2H_PF_CREATE:
3059 *(ablk->pcifn) = rsp->pcifn;
3060 ablk->pcifn = NULL;
3061 break;
3062
3063 default:
3064 WARN_ON(1);
3065 }
3066
3067 ablk->busy = BFA_FALSE;
3068 if (ablk->cbfn) {
3069 cbfn = ablk->cbfn;
3070 ablk->cbfn = NULL;
3071 cbfn(ablk->cbarg, rsp->status);
3072 }
3073}
3074
3075static void
3076bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3077{
3078 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3079
3080 bfa_trc(ablk->ioc, event);
3081
3082 switch (event) {
3083 case BFA_IOC_E_ENABLED:
3084 WARN_ON(ablk->busy != BFA_FALSE);
3085 break;
3086
3087 case BFA_IOC_E_DISABLED:
3088 case BFA_IOC_E_FAILED:
3089 /* Fail any pending requests */
3090 ablk->pcifn = NULL;
3091 if (ablk->busy) {
3092 if (ablk->cbfn)
3093 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3094 ablk->cbfn = NULL;
3095 ablk->busy = BFA_FALSE;
3096 }
3097 break;
3098
3099 default:
3100 WARN_ON(1);
3101 break;
3102 }
3103}
3104
3105u32
3106bfa_ablk_meminfo(void)
3107{
3108 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3109}
3110
3111void
3112bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3113{
3114 ablk->dma_addr.kva = dma_kva;
3115 ablk->dma_addr.pa = dma_pa;
3116}
3117
3118void
3119bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3120{
3121 ablk->ioc = ioc;
3122
3123 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3124 bfa_q_qe_init(&ablk->ioc_notify);
3125 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3126 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3127}
3128
3129bfa_status_t
3130bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3131 bfa_ablk_cbfn_t cbfn, void *cbarg)
3132{
3133 struct bfi_ablk_h2i_query_s *m;
3134
3135 WARN_ON(!ablk_cfg);
3136
3137 if (!bfa_ioc_is_operational(ablk->ioc)) {
3138 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3139 return BFA_STATUS_IOC_FAILURE;
3140 }
3141
3142 if (ablk->busy) {
3143 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3144 return BFA_STATUS_DEVBUSY;
3145 }
3146
3147 ablk->cfg = ablk_cfg;
3148 ablk->cbfn = cbfn;
3149 ablk->cbarg = cbarg;
3150 ablk->busy = BFA_TRUE;
3151
3152 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3153 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3154 bfa_ioc_portid(ablk->ioc));
3155 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3156 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3157
3158 return BFA_STATUS_OK;
3159}
3160
3161bfa_status_t
3162bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3163 u8 port, enum bfi_pcifn_class personality, int bw,
3164 bfa_ablk_cbfn_t cbfn, void *cbarg)
3165{
3166 struct bfi_ablk_h2i_pf_req_s *m;
3167
3168 if (!bfa_ioc_is_operational(ablk->ioc)) {
3169 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3170 return BFA_STATUS_IOC_FAILURE;
3171 }
3172
3173 if (ablk->busy) {
3174 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3175 return BFA_STATUS_DEVBUSY;
3176 }
3177
3178 ablk->pcifn = pcifn;
3179 ablk->cbfn = cbfn;
3180 ablk->cbarg = cbarg;
3181 ablk->busy = BFA_TRUE;
3182
3183 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3184 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3185 bfa_ioc_portid(ablk->ioc));
3186 m->pers = cpu_to_be16((u16)personality);
3187 m->bw = cpu_to_be32(bw);
3188 m->port = port;
3189 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3190
3191 return BFA_STATUS_OK;
3192}
3193
3194bfa_status_t
3195bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3196 bfa_ablk_cbfn_t cbfn, void *cbarg)
3197{
3198 struct bfi_ablk_h2i_pf_req_s *m;
3199
3200 if (!bfa_ioc_is_operational(ablk->ioc)) {
3201 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3202 return BFA_STATUS_IOC_FAILURE;
3203 }
3204
3205 if (ablk->busy) {
3206 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3207 return BFA_STATUS_DEVBUSY;
3208 }
3209
3210 ablk->cbfn = cbfn;
3211 ablk->cbarg = cbarg;
3212 ablk->busy = BFA_TRUE;
3213
3214 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3215 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3216 bfa_ioc_portid(ablk->ioc));
3217 m->pcifn = (u8)pcifn;
3218 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3219
3220 return BFA_STATUS_OK;
3221}
3222
3223bfa_status_t
3224bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3225 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3226{
3227 struct bfi_ablk_h2i_cfg_req_s *m;
3228
3229 if (!bfa_ioc_is_operational(ablk->ioc)) {
3230 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3231 return BFA_STATUS_IOC_FAILURE;
3232 }
3233
3234 if (ablk->busy) {
3235 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3236 return BFA_STATUS_DEVBUSY;
3237 }
3238
3239 ablk->cbfn = cbfn;
3240 ablk->cbarg = cbarg;
3241 ablk->busy = BFA_TRUE;
3242
3243 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3244 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3245 bfa_ioc_portid(ablk->ioc));
3246 m->mode = (u8)mode;
3247 m->max_pf = (u8)max_pf;
3248 m->max_vf = (u8)max_vf;
3249 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3250
3251 return BFA_STATUS_OK;
3252}
3253
3254bfa_status_t
3255bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3256 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3257{
3258 struct bfi_ablk_h2i_cfg_req_s *m;
3259
3260 if (!bfa_ioc_is_operational(ablk->ioc)) {
3261 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3262 return BFA_STATUS_IOC_FAILURE;
3263 }
3264
3265 if (ablk->busy) {
3266 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3267 return BFA_STATUS_DEVBUSY;
3268 }
3269
3270 ablk->cbfn = cbfn;
3271 ablk->cbarg = cbarg;
3272 ablk->busy = BFA_TRUE;
3273
3274 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3275 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3276 bfa_ioc_portid(ablk->ioc));
3277 m->port = (u8)port;
3278 m->mode = (u8)mode;
3279 m->max_pf = (u8)max_pf;
3280 m->max_vf = (u8)max_vf;
3281 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3282
3283 return BFA_STATUS_OK;
3284}
3285
3286bfa_status_t
3287bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3288 bfa_ablk_cbfn_t cbfn, void *cbarg)
3289{
3290 struct bfi_ablk_h2i_pf_req_s *m;
3291
3292 if (!bfa_ioc_is_operational(ablk->ioc)) {
3293 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3294 return BFA_STATUS_IOC_FAILURE;
3295 }
3296
3297 if (ablk->busy) {
3298 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3299 return BFA_STATUS_DEVBUSY;
3300 }
3301
3302 ablk->cbfn = cbfn;
3303 ablk->cbarg = cbarg;
3304 ablk->busy = BFA_TRUE;
3305
3306 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3307 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3308 bfa_ioc_portid(ablk->ioc));
3309 m->pcifn = (u8)pcifn;
3310 m->bw = cpu_to_be32(bw);
3311 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3312
3313 return BFA_STATUS_OK;
3314}
3315
3316bfa_status_t
3317bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3318{
3319 struct bfi_ablk_h2i_optrom_s *m;
3320
3321 if (!bfa_ioc_is_operational(ablk->ioc)) {
3322 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3323 return BFA_STATUS_IOC_FAILURE;
3324 }
3325
3326 if (ablk->busy) {
3327 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3328 return BFA_STATUS_DEVBUSY;
3329 }
3330
3331 ablk->cbfn = cbfn;
3332 ablk->cbarg = cbarg;
3333 ablk->busy = BFA_TRUE;
3334
3335 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3336 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3337 bfa_ioc_portid(ablk->ioc));
3338 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3339
3340 return BFA_STATUS_OK;
3341}
3342
3343bfa_status_t
3344bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3345{
3346 struct bfi_ablk_h2i_optrom_s *m;
3347
3348 if (!bfa_ioc_is_operational(ablk->ioc)) {
3349 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3350 return BFA_STATUS_IOC_FAILURE;
3351 }
3352
3353 if (ablk->busy) {
3354 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3355 return BFA_STATUS_DEVBUSY;
3356 }
3357
3358 ablk->cbfn = cbfn;
3359 ablk->cbarg = cbarg;
3360 ablk->busy = BFA_TRUE;
3361
3362 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3363 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3364 bfa_ioc_portid(ablk->ioc));
3365 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3366
3367 return BFA_STATUS_OK;
3368}
3369
3370/*
3371 * SFP module specific
3372 */
3373
3374/* forward declarations */
3375static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3376static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3377static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3378 enum bfa_port_speed portspeed);
3379
3380static void
3381bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3382{
3383 bfa_trc(sfp, sfp->lock);
3384 if (sfp->cbfn)
3385 sfp->cbfn(sfp->cbarg, sfp->status);
3386 sfp->lock = 0;
3387 sfp->cbfn = NULL;
3388}
3389
3390static void
3391bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3392{
3393 bfa_trc(sfp, sfp->portspeed);
3394 if (sfp->media) {
3395 bfa_sfp_media_get(sfp);
3396 if (sfp->state_query_cbfn)
3397 sfp->state_query_cbfn(sfp->state_query_cbarg,
3398 sfp->status);
3399 sfp->media = NULL;
3400 }
3401
3402 if (sfp->portspeed) {
3403 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3404 if (sfp->state_query_cbfn)
3405 sfp->state_query_cbfn(sfp->state_query_cbarg,
3406 sfp->status);
3407 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3408 }
3409
3410 sfp->state_query_lock = 0;
3411 sfp->state_query_cbfn = NULL;
3412}
3413
3414/*
3415 * IOC event handler.
3416 */
3417static void
3418bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3419{
3420 struct bfa_sfp_s *sfp = sfp_arg;
3421
3422 bfa_trc(sfp, event);
3423 bfa_trc(sfp, sfp->lock);
3424 bfa_trc(sfp, sfp->state_query_lock);
3425
3426 switch (event) {
3427 case BFA_IOC_E_DISABLED:
3428 case BFA_IOC_E_FAILED:
3429 if (sfp->lock) {
3430 sfp->status = BFA_STATUS_IOC_FAILURE;
3431 bfa_cb_sfp_show(sfp);
3432 }
3433
3434 if (sfp->state_query_lock) {
3435 sfp->status = BFA_STATUS_IOC_FAILURE;
3436 bfa_cb_sfp_state_query(sfp);
3437 }
3438 break;
3439
3440 default:
3441 break;
3442 }
3443}
3444
3445/*
3446 * SFP get data send
3447 */
3448static void
3449bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3450{
3451 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3452
3453 bfa_trc(sfp, req->memtype);
3454
3455 /* build host command */
3456 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3457 bfa_ioc_portid(sfp->ioc));
3458
3459 /* send mbox cmd */
3460 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3461}
3462
3463/*
3464 * SFP is valid, read sfp data
3465 */
3466static void
3467bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3468{
3469 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3470
3471 WARN_ON(sfp->lock != 0);
3472 bfa_trc(sfp, sfp->state);
3473
3474 sfp->lock = 1;
3475 sfp->memtype = memtype;
3476 req->memtype = memtype;
3477
3478 /* Setup SG list */
3479 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3480
3481 bfa_sfp_getdata_send(sfp);
3482}
3483
3484/*
3485 * SFP show complete
3486 */
3487static void
3488bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3489{
3490 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3491
3492 if (!sfp->lock) {
3493 /*
3494 * receiving response after ioc failure
3495 */
3496 bfa_trc(sfp, sfp->lock);
3497 return;
3498 }
3499
3500 bfa_trc(sfp, rsp->status);
3501 if (rsp->status == BFA_STATUS_OK) {
3502 sfp->data_valid = 1;
3503 if (sfp->state == BFA_SFP_STATE_VALID)
3504 sfp->status = BFA_STATUS_OK;
3505 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3506 sfp->status = BFA_STATUS_SFP_UNSUPP;
3507 else
3508 bfa_trc(sfp, sfp->state);
3509 } else {
3510 sfp->data_valid = 0;
3511 sfp->status = rsp->status;
3512 /* sfpshow shouldn't change sfp state */
3513 }
3514
3515 bfa_trc(sfp, sfp->memtype);
3516 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3517 bfa_trc(sfp, sfp->data_valid);
3518 if (sfp->data_valid) {
3519 u32 size = sizeof(struct sfp_mem_s);
3520 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3521 memcpy(des, sfp->dbuf_kva, size);
3522 }
3523 /*
3524 * Queue completion callback.
3525 */
3526 bfa_cb_sfp_show(sfp);
3527 } else
3528 sfp->lock = 0;
3529
3530 bfa_trc(sfp, sfp->state_query_lock);
3531 if (sfp->state_query_lock) {
3532 sfp->state = rsp->state;
3533 /* Complete callback */
3534 bfa_cb_sfp_state_query(sfp);
3535 }
3536}
3537
3538/*
3539 * SFP query fw sfp state
3540 */
3541static void
3542bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3543{
3544 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3545
3546 /* Should not be doing query if not in _INIT state */
3547 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3548 WARN_ON(sfp->state_query_lock != 0);
3549 bfa_trc(sfp, sfp->state);
3550
3551 sfp->state_query_lock = 1;
3552 req->memtype = 0;
3553
3554 if (!sfp->lock)
3555 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3556}
3557
3558static void
3559bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3560{
3561 enum bfa_defs_sfp_media_e *media = sfp->media;
3562
3563 *media = BFA_SFP_MEDIA_UNKNOWN;
3564
3565 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3566 *media = BFA_SFP_MEDIA_UNSUPPORT;
3567 else if (sfp->state == BFA_SFP_STATE_VALID) {
3568 union sfp_xcvr_e10g_code_u e10g;
3569 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3570 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3571 (sfpmem->srlid_base.xcvr[5] >> 1);
3572
3573 e10g.b = sfpmem->srlid_base.xcvr[0];
3574 bfa_trc(sfp, e10g.b);
3575 bfa_trc(sfp, xmtr_tech);
3576 /* check fc transmitter tech */
3577 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3578 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3579 (xmtr_tech & SFP_XMTR_TECH_CA))
3580 *media = BFA_SFP_MEDIA_CU;
3581 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3582 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3583 *media = BFA_SFP_MEDIA_EL;
3584 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3585 (xmtr_tech & SFP_XMTR_TECH_LC))
3586 *media = BFA_SFP_MEDIA_LW;
3587 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3588 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3589 (xmtr_tech & SFP_XMTR_TECH_SA))
3590 *media = BFA_SFP_MEDIA_SW;
3591 /* Check 10G Ethernet Compilance code */
3592 else if (e10g.b & 0x10)
3593 *media = BFA_SFP_MEDIA_SW;
3594 else if (e10g.b & 0x60)
3595 *media = BFA_SFP_MEDIA_LW;
3596 else if (e10g.r.e10g_unall & 0x80)
3597 *media = BFA_SFP_MEDIA_UNKNOWN;
3598 else
3599 bfa_trc(sfp, 0);
3600 } else
3601 bfa_trc(sfp, sfp->state);
3602}
3603
3604static bfa_status_t
3605bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3606{
3607 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3608 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3609 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3610 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3611
3612 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3613 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3614 return BFA_STATUS_OK;
3615 else {
3616 bfa_trc(sfp, e10g.b);
3617 return BFA_STATUS_UNSUPP_SPEED;
3618 }
3619 }
3620 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3621 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3622 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3623 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3624 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3625 return BFA_STATUS_OK;
3626 else {
3627 bfa_trc(sfp, portspeed);
3628 bfa_trc(sfp, fc3.b);
3629 bfa_trc(sfp, e10g.b);
3630 return BFA_STATUS_UNSUPP_SPEED;
3631 }
3632}
3633
3634/*
3635 * SFP hmbox handler
3636 */
3637void
3638bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3639{
3640 struct bfa_sfp_s *sfp = sfparg;
3641
3642 switch (msg->mh.msg_id) {
3643 case BFI_SFP_I2H_SHOW:
3644 bfa_sfp_show_comp(sfp, msg);
3645 break;
3646
3647 case BFI_SFP_I2H_SCN:
3648 bfa_trc(sfp, msg->mh.msg_id);
3649 break;
3650
3651 default:
3652 bfa_trc(sfp, msg->mh.msg_id);
3653 WARN_ON(1);
3654 }
3655}
3656
3657/*
3658 * Return DMA memory needed by sfp module.
3659 */
3660u32
3661bfa_sfp_meminfo(void)
3662{
3663 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3664}
3665
3666/*
3667 * Attach virtual and physical memory for SFP.
3668 */
3669void
3670bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3671 struct bfa_trc_mod_s *trcmod)
3672{
3673 sfp->dev = dev;
3674 sfp->ioc = ioc;
3675 sfp->trcmod = trcmod;
3676
3677 sfp->cbfn = NULL;
3678 sfp->cbarg = NULL;
3679 sfp->sfpmem = NULL;
3680 sfp->lock = 0;
3681 sfp->data_valid = 0;
3682 sfp->state = BFA_SFP_STATE_INIT;
3683 sfp->state_query_lock = 0;
3684 sfp->state_query_cbfn = NULL;
3685 sfp->state_query_cbarg = NULL;
3686 sfp->media = NULL;
3687 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3688 sfp->is_elb = BFA_FALSE;
3689
3690 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3691 bfa_q_qe_init(&sfp->ioc_notify);
3692 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3693 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3694}
3695
3696/*
3697 * Claim Memory for SFP
3698 */
3699void
3700bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3701{
3702 sfp->dbuf_kva = dm_kva;
3703 sfp->dbuf_pa = dm_pa;
3704 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3705
3706 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3707 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3708}
3709
3710/*
3711 * Show SFP eeprom content
3712 *
3713 * @param[in] sfp - bfa sfp module
3714 *
3715 * @param[out] sfpmem - sfp eeprom data
3716 *
3717 */
3718bfa_status_t
3719bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3720 bfa_cb_sfp_t cbfn, void *cbarg)
3721{
3722
3723 if (!bfa_ioc_is_operational(sfp->ioc)) {
3724 bfa_trc(sfp, 0);
3725 return BFA_STATUS_IOC_NON_OP;
3726 }
3727
3728 if (sfp->lock) {
3729 bfa_trc(sfp, 0);
3730 return BFA_STATUS_DEVBUSY;
3731 }
3732
3733 sfp->cbfn = cbfn;
3734 sfp->cbarg = cbarg;
3735 sfp->sfpmem = sfpmem;
3736
3737 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3738 return BFA_STATUS_OK;
3739}
3740
3741/*
3742 * Return SFP Media type
3743 *
3744 * @param[in] sfp - bfa sfp module
3745 *
3746 * @param[out] media - port speed from user
3747 *
3748 */
3749bfa_status_t
3750bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3751 bfa_cb_sfp_t cbfn, void *cbarg)
3752{
3753 if (!bfa_ioc_is_operational(sfp->ioc)) {
3754 bfa_trc(sfp, 0);
3755 return BFA_STATUS_IOC_NON_OP;
3756 }
3757
3758 sfp->media = media;
3759 if (sfp->state == BFA_SFP_STATE_INIT) {
3760 if (sfp->state_query_lock) {
3761 bfa_trc(sfp, 0);
3762 return BFA_STATUS_DEVBUSY;
3763 } else {
3764 sfp->state_query_cbfn = cbfn;
3765 sfp->state_query_cbarg = cbarg;
3766 bfa_sfp_state_query(sfp);
3767 return BFA_STATUS_SFP_NOT_READY;
3768 }
3769 }
3770
3771 bfa_sfp_media_get(sfp);
3772 return BFA_STATUS_OK;
3773}
3774
3775/*
3776 * Check if user set port speed is allowed by the SFP
3777 *
3778 * @param[in] sfp - bfa sfp module
3779 * @param[in] portspeed - port speed from user
3780 *
3781 */
3782bfa_status_t
3783bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3784 bfa_cb_sfp_t cbfn, void *cbarg)
3785{
3786 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3787
3788 if (!bfa_ioc_is_operational(sfp->ioc))
3789 return BFA_STATUS_IOC_NON_OP;
3790
3791 /* For Mezz card, all speed is allowed */
3792 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3793 return BFA_STATUS_OK;
3794
3795 /* Check SFP state */
3796 sfp->portspeed = portspeed;
3797 if (sfp->state == BFA_SFP_STATE_INIT) {
3798 if (sfp->state_query_lock) {
3799 bfa_trc(sfp, 0);
3800 return BFA_STATUS_DEVBUSY;
3801 } else {
3802 sfp->state_query_cbfn = cbfn;
3803 sfp->state_query_cbarg = cbarg;
3804 bfa_sfp_state_query(sfp);
3805 return BFA_STATUS_SFP_NOT_READY;
3806 }
3807 }
3808
3809 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3810 sfp->state == BFA_SFP_STATE_FAILED) {
3811 bfa_trc(sfp, sfp->state);
3812 return BFA_STATUS_NO_SFP_DEV;
3813 }
3814
3815 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3816 bfa_trc(sfp, sfp->state);
3817 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3818 }
3819
3820 /* For eloopback, all speed is allowed */
3821 if (sfp->is_elb)
3822 return BFA_STATUS_OK;
3823
3824 return bfa_sfp_speed_valid(sfp, portspeed);
3825}
3826
3827/*
3828 * Flash module specific
3829 */
3830
3831/*
3832 * FLASH DMA buffer should be big enough to hold both MFG block and
3833 * asic block(64k) at the same time and also should be 2k aligned to
3834 * avoid write segement to cross sector boundary.
3835 */
3836#define BFA_FLASH_SEG_SZ 2048
3837#define BFA_FLASH_DMA_BUF_SZ \
3838 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3839
3840static void
3841bfa_flash_cb(struct bfa_flash_s *flash)
3842{
3843 flash->op_busy = 0;
3844 if (flash->cbfn)
3845 flash->cbfn(flash->cbarg, flash->status);
3846}
3847
3848static void
3849bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3850{
3851 struct bfa_flash_s *flash = cbarg;
3852
3853 bfa_trc(flash, event);
3854 switch (event) {
3855 case BFA_IOC_E_DISABLED:
3856 case BFA_IOC_E_FAILED:
3857 if (flash->op_busy) {
3858 flash->status = BFA_STATUS_IOC_FAILURE;
3859 flash->cbfn(flash->cbarg, flash->status);
3860 flash->op_busy = 0;
3861 }
3862 break;
3863
3864 default:
3865 break;
3866 }
3867}
3868
3869/*
3870 * Send flash attribute query request.
3871 *
3872 * @param[in] cbarg - callback argument
3873 */
3874static void
3875bfa_flash_query_send(void *cbarg)
3876{
3877 struct bfa_flash_s *flash = cbarg;
3878 struct bfi_flash_query_req_s *msg =
3879 (struct bfi_flash_query_req_s *) flash->mb.msg;
3880
3881 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3882 bfa_ioc_portid(flash->ioc));
3883 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
3884 flash->dbuf_pa);
3885 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3886}
3887
3888/*
3889 * Send flash write request.
3890 *
3891 * @param[in] cbarg - callback argument
3892 */
3893static void
3894bfa_flash_write_send(struct bfa_flash_s *flash)
3895{
3896 struct bfi_flash_write_req_s *msg =
3897 (struct bfi_flash_write_req_s *) flash->mb.msg;
3898 u32 len;
3899
3900 msg->type = be32_to_cpu(flash->type);
3901 msg->instance = flash->instance;
3902 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3903 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3904 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3905 msg->length = be32_to_cpu(len);
3906
3907 /* indicate if it's the last msg of the whole write operation */
3908 msg->last = (len == flash->residue) ? 1 : 0;
3909
3910 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3911 bfa_ioc_portid(flash->ioc));
3912 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3913 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3914 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3915
3916 flash->residue -= len;
3917 flash->offset += len;
3918}
3919
3920/*
3921 * Send flash read request.
3922 *
3923 * @param[in] cbarg - callback argument
3924 */
3925static void
3926bfa_flash_read_send(void *cbarg)
3927{
3928 struct bfa_flash_s *flash = cbarg;
3929 struct bfi_flash_read_req_s *msg =
3930 (struct bfi_flash_read_req_s *) flash->mb.msg;
3931 u32 len;
3932
3933 msg->type = be32_to_cpu(flash->type);
3934 msg->instance = flash->instance;
3935 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3936 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3937 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3938 msg->length = be32_to_cpu(len);
3939 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3940 bfa_ioc_portid(flash->ioc));
3941 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3942 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3943}
3944
3945/*
3946 * Send flash erase request.
3947 *
3948 * @param[in] cbarg - callback argument
3949 */
3950static void
3951bfa_flash_erase_send(void *cbarg)
3952{
3953 struct bfa_flash_s *flash = cbarg;
3954 struct bfi_flash_erase_req_s *msg =
3955 (struct bfi_flash_erase_req_s *) flash->mb.msg;
3956
3957 msg->type = be32_to_cpu(flash->type);
3958 msg->instance = flash->instance;
3959 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
3960 bfa_ioc_portid(flash->ioc));
3961 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3962}
3963
3964/*
3965 * Process flash response messages upon receiving interrupts.
3966 *
3967 * @param[in] flasharg - flash structure
3968 * @param[in] msg - message structure
3969 */
3970static void
3971bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3972{
3973 struct bfa_flash_s *flash = flasharg;
3974 u32 status;
3975
3976 union {
3977 struct bfi_flash_query_rsp_s *query;
3978 struct bfi_flash_erase_rsp_s *erase;
3979 struct bfi_flash_write_rsp_s *write;
3980 struct bfi_flash_read_rsp_s *read;
3981 struct bfi_mbmsg_s *msg;
3982 } m;
3983
3984 m.msg = msg;
3985 bfa_trc(flash, msg->mh.msg_id);
3986
3987 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
3988 /* receiving response after ioc failure */
3989 bfa_trc(flash, 0x9999);
3990 return;
3991 }
3992
3993 switch (msg->mh.msg_id) {
3994 case BFI_FLASH_I2H_QUERY_RSP:
3995 status = be32_to_cpu(m.query->status);
3996 bfa_trc(flash, status);
3997 if (status == BFA_STATUS_OK) {
3998 u32 i;
3999 struct bfa_flash_attr_s *attr, *f;
4000
4001 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4002 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4003 attr->status = be32_to_cpu(f->status);
4004 attr->npart = be32_to_cpu(f->npart);
4005 bfa_trc(flash, attr->status);
4006 bfa_trc(flash, attr->npart);
4007 for (i = 0; i < attr->npart; i++) {
4008 attr->part[i].part_type =
4009 be32_to_cpu(f->part[i].part_type);
4010 attr->part[i].part_instance =
4011 be32_to_cpu(f->part[i].part_instance);
4012 attr->part[i].part_off =
4013 be32_to_cpu(f->part[i].part_off);
4014 attr->part[i].part_size =
4015 be32_to_cpu(f->part[i].part_size);
4016 attr->part[i].part_len =
4017 be32_to_cpu(f->part[i].part_len);
4018 attr->part[i].part_status =
4019 be32_to_cpu(f->part[i].part_status);
4020 }
4021 }
4022 flash->status = status;
4023 bfa_flash_cb(flash);
4024 break;
4025 case BFI_FLASH_I2H_ERASE_RSP:
4026 status = be32_to_cpu(m.erase->status);
4027 bfa_trc(flash, status);
4028 flash->status = status;
4029 bfa_flash_cb(flash);
4030 break;
4031 case BFI_FLASH_I2H_WRITE_RSP:
4032 status = be32_to_cpu(m.write->status);
4033 bfa_trc(flash, status);
4034 if (status != BFA_STATUS_OK || flash->residue == 0) {
4035 flash->status = status;
4036 bfa_flash_cb(flash);
4037 } else {
4038 bfa_trc(flash, flash->offset);
4039 bfa_flash_write_send(flash);
4040 }
4041 break;
4042 case BFI_FLASH_I2H_READ_RSP:
4043 status = be32_to_cpu(m.read->status);
4044 bfa_trc(flash, status);
4045 if (status != BFA_STATUS_OK) {
4046 flash->status = status;
4047 bfa_flash_cb(flash);
4048 } else {
4049 u32 len = be32_to_cpu(m.read->length);
4050 bfa_trc(flash, flash->offset);
4051 bfa_trc(flash, len);
4052 memcpy(flash->ubuf + flash->offset,
4053 flash->dbuf_kva, len);
4054 flash->residue -= len;
4055 flash->offset += len;
4056 if (flash->residue == 0) {
4057 flash->status = status;
4058 bfa_flash_cb(flash);
4059 } else
4060 bfa_flash_read_send(flash);
4061 }
4062 break;
4063 case BFI_FLASH_I2H_BOOT_VER_RSP:
4064 case BFI_FLASH_I2H_EVENT:
4065 bfa_trc(flash, msg->mh.msg_id);
4066 break;
4067
4068 default:
4069 WARN_ON(1);
4070 }
4071}
4072
4073/*
4074 * Flash memory info API.
4075 *
4076 * @param[in] mincfg - minimal cfg variable
4077 */
4078u32
4079bfa_flash_meminfo(bfa_boolean_t mincfg)
4080{
4081 /* min driver doesn't need flash */
4082 if (mincfg)
4083 return 0;
4084 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4085}
4086
4087/*
4088 * Flash attach API.
4089 *
4090 * @param[in] flash - flash structure
4091 * @param[in] ioc - ioc structure
4092 * @param[in] dev - device structure
4093 * @param[in] trcmod - trace module
4094 * @param[in] logmod - log module
4095 */
4096void
4097bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4098 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4099{
4100 flash->ioc = ioc;
4101 flash->trcmod = trcmod;
4102 flash->cbfn = NULL;
4103 flash->cbarg = NULL;
4104 flash->op_busy = 0;
4105
4106 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4107 bfa_q_qe_init(&flash->ioc_notify);
4108 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4109 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4110
4111 /* min driver doesn't need flash */
4112 if (mincfg) {
4113 flash->dbuf_kva = NULL;
4114 flash->dbuf_pa = 0;
4115 }
4116}
4117
4118/*
4119 * Claim memory for flash
4120 *
4121 * @param[in] flash - flash structure
4122 * @param[in] dm_kva - pointer to virtual memory address
4123 * @param[in] dm_pa - physical memory address
4124 * @param[in] mincfg - minimal cfg variable
4125 */
4126void
4127bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4128 bfa_boolean_t mincfg)
4129{
4130 if (mincfg)
4131 return;
4132
4133 flash->dbuf_kva = dm_kva;
4134 flash->dbuf_pa = dm_pa;
4135 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4136 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4137 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4138}
4139
4140/*
4141 * Get flash attribute.
4142 *
4143 * @param[in] flash - flash structure
4144 * @param[in] attr - flash attribute structure
4145 * @param[in] cbfn - callback function
4146 * @param[in] cbarg - callback argument
4147 *
4148 * Return status.
4149 */
4150bfa_status_t
4151bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4152 bfa_cb_flash_t cbfn, void *cbarg)
4153{
4154 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4155
4156 if (!bfa_ioc_is_operational(flash->ioc))
4157 return BFA_STATUS_IOC_NON_OP;
4158
4159 if (flash->op_busy) {
4160 bfa_trc(flash, flash->op_busy);
4161 return BFA_STATUS_DEVBUSY;
4162 }
4163
4164 flash->op_busy = 1;
4165 flash->cbfn = cbfn;
4166 flash->cbarg = cbarg;
4167 flash->ubuf = (u8 *) attr;
4168 bfa_flash_query_send(flash);
4169
4170 return BFA_STATUS_OK;
4171}
4172
4173/*
4174 * Erase flash partition.
4175 *
4176 * @param[in] flash - flash structure
4177 * @param[in] type - flash partition type
4178 * @param[in] instance - flash partition instance
4179 * @param[in] cbfn - callback function
4180 * @param[in] cbarg - callback argument
4181 *
4182 * Return status.
4183 */
4184bfa_status_t
4185bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4186 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4187{
4188 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4189 bfa_trc(flash, type);
4190 bfa_trc(flash, instance);
4191
4192 if (!bfa_ioc_is_operational(flash->ioc))
4193 return BFA_STATUS_IOC_NON_OP;
4194
4195 if (flash->op_busy) {
4196 bfa_trc(flash, flash->op_busy);
4197 return BFA_STATUS_DEVBUSY;
4198 }
4199
4200 flash->op_busy = 1;
4201 flash->cbfn = cbfn;
4202 flash->cbarg = cbarg;
4203 flash->type = type;
4204 flash->instance = instance;
4205
4206 bfa_flash_erase_send(flash);
4207 return BFA_STATUS_OK;
4208}
4209
4210/*
4211 * Update flash partition.
4212 *
4213 * @param[in] flash - flash structure
4214 * @param[in] type - flash partition type
4215 * @param[in] instance - flash partition instance
4216 * @param[in] buf - update data buffer
4217 * @param[in] len - data buffer length
4218 * @param[in] offset - offset relative to the partition starting address
4219 * @param[in] cbfn - callback function
4220 * @param[in] cbarg - callback argument
4221 *
4222 * Return status.
4223 */
4224bfa_status_t
4225bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4226 u8 instance, void *buf, u32 len, u32 offset,
4227 bfa_cb_flash_t cbfn, void *cbarg)
4228{
4229 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4230 bfa_trc(flash, type);
4231 bfa_trc(flash, instance);
4232 bfa_trc(flash, len);
4233 bfa_trc(flash, offset);
4234
4235 if (!bfa_ioc_is_operational(flash->ioc))
4236 return BFA_STATUS_IOC_NON_OP;
4237
4238 /*
4239 * 'len' must be in word (4-byte) boundary
4240 * 'offset' must be in sector (16kb) boundary
4241 */
4242 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4243 return BFA_STATUS_FLASH_BAD_LEN;
4244
4245 if (type == BFA_FLASH_PART_MFG)
4246 return BFA_STATUS_EINVAL;
4247
4248 if (flash->op_busy) {
4249 bfa_trc(flash, flash->op_busy);
4250 return BFA_STATUS_DEVBUSY;
4251 }
4252
4253 flash->op_busy = 1;
4254 flash->cbfn = cbfn;
4255 flash->cbarg = cbarg;
4256 flash->type = type;
4257 flash->instance = instance;
4258 flash->residue = len;
4259 flash->offset = 0;
4260 flash->addr_off = offset;
4261 flash->ubuf = buf;
4262
4263 bfa_flash_write_send(flash);
4264 return BFA_STATUS_OK;
4265}
4266
4267/*
4268 * Read flash partition.
4269 *
4270 * @param[in] flash - flash structure
4271 * @param[in] type - flash partition type
4272 * @param[in] instance - flash partition instance
4273 * @param[in] buf - read data buffer
4274 * @param[in] len - data buffer length
4275 * @param[in] offset - offset relative to the partition starting address
4276 * @param[in] cbfn - callback function
4277 * @param[in] cbarg - callback argument
4278 *
4279 * Return status.
4280 */
4281bfa_status_t
4282bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4283 u8 instance, void *buf, u32 len, u32 offset,
4284 bfa_cb_flash_t cbfn, void *cbarg)
4285{
4286 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4287 bfa_trc(flash, type);
4288 bfa_trc(flash, instance);
4289 bfa_trc(flash, len);
4290 bfa_trc(flash, offset);
4291
4292 if (!bfa_ioc_is_operational(flash->ioc))
4293 return BFA_STATUS_IOC_NON_OP;
4294
4295 /*
4296 * 'len' must be in word (4-byte) boundary
4297 * 'offset' must be in sector (16kb) boundary
4298 */
4299 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4300 return BFA_STATUS_FLASH_BAD_LEN;
4301
4302 if (flash->op_busy) {
4303 bfa_trc(flash, flash->op_busy);
4304 return BFA_STATUS_DEVBUSY;
4305 }
4306
4307 flash->op_busy = 1;
4308 flash->cbfn = cbfn;
4309 flash->cbarg = cbarg;
4310 flash->type = type;
4311 flash->instance = instance;
4312 flash->residue = len;
4313 flash->offset = 0;
4314 flash->addr_off = offset;
4315 flash->ubuf = buf;
4316 bfa_flash_read_send(flash);
4317
4318 return BFA_STATUS_OK;
4319}
4320
4321/*
4322 * DIAG module specific
4323 */
4324
4325#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4326#define BFA_DIAG_FWPING_TOV 1000 /* msec */
4327
4328/* IOC event handler */
4329static void
4330bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4331{
4332 struct bfa_diag_s *diag = diag_arg;
4333
4334 bfa_trc(diag, event);
4335 bfa_trc(diag, diag->block);
4336 bfa_trc(diag, diag->fwping.lock);
4337 bfa_trc(diag, diag->tsensor.lock);
4338
4339 switch (event) {
4340 case BFA_IOC_E_DISABLED:
4341 case BFA_IOC_E_FAILED:
4342 if (diag->fwping.lock) {
4343 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4344 diag->fwping.cbfn(diag->fwping.cbarg,
4345 diag->fwping.status);
4346 diag->fwping.lock = 0;
4347 }
4348
4349 if (diag->tsensor.lock) {
4350 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4351 diag->tsensor.cbfn(diag->tsensor.cbarg,
4352 diag->tsensor.status);
4353 diag->tsensor.lock = 0;
4354 }
4355
4356 if (diag->block) {
4357 if (diag->timer_active) {
4358 bfa_timer_stop(&diag->timer);
4359 diag->timer_active = 0;
4360 }
4361
4362 diag->status = BFA_STATUS_IOC_FAILURE;
4363 diag->cbfn(diag->cbarg, diag->status);
4364 diag->block = 0;
4365 }
4366 break;
4367
4368 default:
4369 break;
4370 }
4371}
4372
4373static void
4374bfa_diag_memtest_done(void *cbarg)
4375{
4376 struct bfa_diag_s *diag = cbarg;
4377 struct bfa_ioc_s *ioc = diag->ioc;
4378 struct bfa_diag_memtest_result *res = diag->result;
4379 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4380 u32 pgnum, pgoff, i;
4381
4382 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4383 pgoff = PSS_SMEM_PGOFF(loff);
4384
4385 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4386
4387 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4388 sizeof(u32)); i++) {
4389 /* read test result from smem */
4390 *((u32 *) res + i) =
4391 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4392 loff += sizeof(u32);
4393 }
4394
4395 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4396 bfa_ioc_reset_fwstate(ioc);
4397
4398 res->status = swab32(res->status);
4399 bfa_trc(diag, res->status);
4400
4401 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4402 diag->status = BFA_STATUS_OK;
4403 else {
4404 diag->status = BFA_STATUS_MEMTEST_FAILED;
4405 res->addr = swab32(res->addr);
4406 res->exp = swab32(res->exp);
4407 res->act = swab32(res->act);
4408 res->err_status = swab32(res->err_status);
4409 res->err_status1 = swab32(res->err_status1);
4410 res->err_addr = swab32(res->err_addr);
4411 bfa_trc(diag, res->addr);
4412 bfa_trc(diag, res->exp);
4413 bfa_trc(diag, res->act);
4414 bfa_trc(diag, res->err_status);
4415 bfa_trc(diag, res->err_status1);
4416 bfa_trc(diag, res->err_addr);
4417 }
4418 diag->timer_active = 0;
4419 diag->cbfn(diag->cbarg, diag->status);
4420 diag->block = 0;
4421}
4422
4423/*
4424 * Firmware ping
4425 */
4426
4427/*
4428 * Perform DMA test directly
4429 */
4430static void
4431diag_fwping_send(struct bfa_diag_s *diag)
4432{
4433 struct bfi_diag_fwping_req_s *fwping_req;
4434 u32 i;
4435
4436 bfa_trc(diag, diag->fwping.dbuf_pa);
4437
4438 /* fill DMA area with pattern */
4439 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4440 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4441
4442 /* Fill mbox msg */
4443 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4444
4445 /* Setup SG list */
4446 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4447 diag->fwping.dbuf_pa);
4448 /* Set up dma count */
4449 fwping_req->count = cpu_to_be32(diag->fwping.count);
4450 /* Set up data pattern */
4451 fwping_req->data = diag->fwping.data;
4452
4453 /* build host command */
4454 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4455 bfa_ioc_portid(diag->ioc));
4456
4457 /* send mbox cmd */
4458 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4459}
4460
4461static void
4462diag_fwping_comp(struct bfa_diag_s *diag,
4463 struct bfi_diag_fwping_rsp_s *diag_rsp)
4464{
4465 u32 rsp_data = diag_rsp->data;
4466 u8 rsp_dma_status = diag_rsp->dma_status;
4467
4468 bfa_trc(diag, rsp_data);
4469 bfa_trc(diag, rsp_dma_status);
4470
4471 if (rsp_dma_status == BFA_STATUS_OK) {
4472 u32 i, pat;
4473 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4474 diag->fwping.data;
4475 /* Check mbox data */
4476 if (diag->fwping.data != rsp_data) {
4477 bfa_trc(diag, rsp_data);
4478 diag->fwping.result->dmastatus =
4479 BFA_STATUS_DATACORRUPTED;
4480 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4481 diag->fwping.cbfn(diag->fwping.cbarg,
4482 diag->fwping.status);
4483 diag->fwping.lock = 0;
4484 return;
4485 }
4486 /* Check dma pattern */
4487 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4488 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4489 bfa_trc(diag, i);
4490 bfa_trc(diag, pat);
4491 bfa_trc(diag,
4492 *((u32 *)diag->fwping.dbuf_kva + i));
4493 diag->fwping.result->dmastatus =
4494 BFA_STATUS_DATACORRUPTED;
4495 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4496 diag->fwping.cbfn(diag->fwping.cbarg,
4497 diag->fwping.status);
4498 diag->fwping.lock = 0;
4499 return;
4500 }
4501 }
4502 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4503 diag->fwping.status = BFA_STATUS_OK;
4504 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4505 diag->fwping.lock = 0;
4506 } else {
4507 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4508 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4509 diag->fwping.lock = 0;
4510 }
4511}
4512
4513/*
4514 * Temperature Sensor
4515 */
4516
4517static void
4518diag_tempsensor_send(struct bfa_diag_s *diag)
4519{
4520 struct bfi_diag_ts_req_s *msg;
4521
4522 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4523 bfa_trc(diag, msg->temp);
4524 /* build host command */
4525 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4526 bfa_ioc_portid(diag->ioc));
4527 /* send mbox cmd */
4528 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4529}
4530
4531static void
4532diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4533{
4534 if (!diag->tsensor.lock) {
4535 /* receiving response after ioc failure */
4536 bfa_trc(diag, diag->tsensor.lock);
4537 return;
4538 }
4539
4540 /*
4541 * ASIC junction tempsensor is a reg read operation
4542 * it will always return OK
4543 */
4544 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4545 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4546 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4547 diag->tsensor.temp->status = BFA_STATUS_OK;
4548
4549 if (rsp->ts_brd) {
4550 if (rsp->status == BFA_STATUS_OK) {
4551 diag->tsensor.temp->brd_temp =
4552 be16_to_cpu(rsp->brd_temp);
4553 } else {
4554 bfa_trc(diag, rsp->status);
4555 diag->tsensor.temp->brd_temp = 0;
4556 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4557 }
4558 }
4559 bfa_trc(diag, rsp->ts_junc);
4560 bfa_trc(diag, rsp->temp);
4561 bfa_trc(diag, rsp->ts_brd);
4562 bfa_trc(diag, rsp->brd_temp);
4563 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4564 diag->tsensor.lock = 0;
4565}
4566
4567/*
4568 * LED Test command
4569 */
4570static void
4571diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4572{
4573 struct bfi_diag_ledtest_req_s *msg;
4574
4575 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4576 /* build host command */
4577 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4578 bfa_ioc_portid(diag->ioc));
4579
4580 /*
4581 * convert the freq from N blinks per 10 sec to
4582 * crossbow ontime value. We do it here because division is need
4583 */
4584 if (ledtest->freq)
4585 ledtest->freq = 500 / ledtest->freq;
4586
4587 if (ledtest->freq == 0)
4588 ledtest->freq = 1;
4589
4590 bfa_trc(diag, ledtest->freq);
4591 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4592 msg->cmd = (u8) ledtest->cmd;
4593 msg->color = (u8) ledtest->color;
4594 msg->portid = bfa_ioc_portid(diag->ioc);
4595 msg->led = ledtest->led;
4596 msg->freq = cpu_to_be16(ledtest->freq);
4597
4598 /* send mbox cmd */
4599 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4600}
4601
4602static void
4603diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
4604{
4605 bfa_trc(diag, diag->ledtest.lock);
4606 diag->ledtest.lock = BFA_FALSE;
4607 /* no bfa_cb_queue is needed because driver is not waiting */
4608}
4609
4610/*
4611 * Port beaconing
4612 */
4613static void
4614diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4615{
4616 struct bfi_diag_portbeacon_req_s *msg;
4617
4618 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4619 /* build host command */
4620 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4621 bfa_ioc_portid(diag->ioc));
4622 msg->beacon = beacon;
4623 msg->period = cpu_to_be32(sec);
4624 /* send mbox cmd */
4625 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4626}
4627
4628static void
4629diag_portbeacon_comp(struct bfa_diag_s *diag)
4630{
4631 bfa_trc(diag, diag->beacon.state);
4632 diag->beacon.state = BFA_FALSE;
4633 if (diag->cbfn_beacon)
4634 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4635}
4636
4637/*
4638 * Diag hmbox handler
4639 */
4640void
4641bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4642{
4643 struct bfa_diag_s *diag = diagarg;
4644
4645 switch (msg->mh.msg_id) {
4646 case BFI_DIAG_I2H_PORTBEACON:
4647 diag_portbeacon_comp(diag);
4648 break;
4649 case BFI_DIAG_I2H_FWPING:
4650 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4651 break;
4652 case BFI_DIAG_I2H_TEMPSENSOR:
4653 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4654 break;
4655 case BFI_DIAG_I2H_LEDTEST:
4656 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4657 break;
4658 default:
4659 bfa_trc(diag, msg->mh.msg_id);
4660 WARN_ON(1);
4661 }
4662}
4663
4664/*
4665 * Gen RAM Test
4666 *
4667 * @param[in] *diag - diag data struct
4668 * @param[in] *memtest - mem test params input from upper layer,
4669 * @param[in] pattern - mem test pattern
4670 * @param[in] *result - mem test result
4671 * @param[in] cbfn - mem test callback functioin
4672 * @param[in] cbarg - callback functioin arg
4673 *
4674 * @param[out]
4675 */
4676bfa_status_t
4677bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4678 u32 pattern, struct bfa_diag_memtest_result *result,
4679 bfa_cb_diag_t cbfn, void *cbarg)
4680{
4681 bfa_trc(diag, pattern);
4682
4683 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4684 return BFA_STATUS_ADAPTER_ENABLED;
4685
4686 /* check to see if there is another destructive diag cmd running */
4687 if (diag->block) {
4688 bfa_trc(diag, diag->block);
4689 return BFA_STATUS_DEVBUSY;
4690 } else
4691 diag->block = 1;
4692
4693 diag->result = result;
4694 diag->cbfn = cbfn;
4695 diag->cbarg = cbarg;
4696
4697 /* download memtest code and take LPU0 out of reset */
4698 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4699
4700 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4701 bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4702 diag->timer_active = 1;
4703 return BFA_STATUS_OK;
4704}
4705
4706/*
4707 * DIAG firmware ping command
4708 *
4709 * @param[in] *diag - diag data struct
4710 * @param[in] cnt - dma loop count for testing PCIE
4711 * @param[in] data - data pattern to pass in fw
4712 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4713 * @param[in] cbfn - callback function
4714 * @param[in] *cbarg - callback functioin arg
4715 *
4716 * @param[out]
4717 */
4718bfa_status_t
4719bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4720 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4721 void *cbarg)
4722{
4723 bfa_trc(diag, cnt);
4724 bfa_trc(diag, data);
4725
4726 if (!bfa_ioc_is_operational(diag->ioc))
4727 return BFA_STATUS_IOC_NON_OP;
4728
4729 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4730 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4731 return BFA_STATUS_CMD_NOTSUPP;
4732
4733 /* check to see if there is another destructive diag cmd running */
4734 if (diag->block || diag->fwping.lock) {
4735 bfa_trc(diag, diag->block);
4736 bfa_trc(diag, diag->fwping.lock);
4737 return BFA_STATUS_DEVBUSY;
4738 }
4739
4740 /* Initialization */
4741 diag->fwping.lock = 1;
4742 diag->fwping.cbfn = cbfn;
4743 diag->fwping.cbarg = cbarg;
4744 diag->fwping.result = result;
4745 diag->fwping.data = data;
4746 diag->fwping.count = cnt;
4747
4748 /* Init test results */
4749 diag->fwping.result->data = 0;
4750 diag->fwping.result->status = BFA_STATUS_OK;
4751
4752 /* kick off the first ping */
4753 diag_fwping_send(diag);
4754 return BFA_STATUS_OK;
4755}
4756
4757/*
4758 * Read Temperature Sensor
4759 *
4760 * @param[in] *diag - diag data struct
4761 * @param[in] *result - pt to bfa_diag_temp_t data struct
4762 * @param[in] cbfn - callback function
4763 * @param[in] *cbarg - callback functioin arg
4764 *
4765 * @param[out]
4766 */
4767bfa_status_t
4768bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4769 struct bfa_diag_results_tempsensor_s *result,
4770 bfa_cb_diag_t cbfn, void *cbarg)
4771{
4772 /* check to see if there is a destructive diag cmd running */
4773 if (diag->block || diag->tsensor.lock) {
4774 bfa_trc(diag, diag->block);
4775 bfa_trc(diag, diag->tsensor.lock);
4776 return BFA_STATUS_DEVBUSY;
4777 }
4778
4779 if (!bfa_ioc_is_operational(diag->ioc))
4780 return BFA_STATUS_IOC_NON_OP;
4781
4782 /* Init diag mod params */
4783 diag->tsensor.lock = 1;
4784 diag->tsensor.temp = result;
4785 diag->tsensor.cbfn = cbfn;
4786 diag->tsensor.cbarg = cbarg;
4787
4788 /* Send msg to fw */
4789 diag_tempsensor_send(diag);
4790
4791 return BFA_STATUS_OK;
4792}
4793
4794/*
4795 * LED Test command
4796 *
4797 * @param[in] *diag - diag data struct
4798 * @param[in] *ledtest - pt to ledtest data structure
4799 *
4800 * @param[out]
4801 */
4802bfa_status_t
4803bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4804{
4805 bfa_trc(diag, ledtest->cmd);
4806
4807 if (!bfa_ioc_is_operational(diag->ioc))
4808 return BFA_STATUS_IOC_NON_OP;
4809
4810 if (diag->beacon.state)
4811 return BFA_STATUS_BEACON_ON;
4812
4813 if (diag->ledtest.lock)
4814 return BFA_STATUS_LEDTEST_OP;
4815
4816 /* Send msg to fw */
4817 diag->ledtest.lock = BFA_TRUE;
4818 diag_ledtest_send(diag, ledtest);
4819
4820 return BFA_STATUS_OK;
4821}
4822
4823/*
4824 * Port beaconing command
4825 *
4826 * @param[in] *diag - diag data struct
4827 * @param[in] beacon - port beaconing 1:ON 0:OFF
4828 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4829 * @param[in] sec - beaconing duration in seconds
4830 *
4831 * @param[out]
4832 */
4833bfa_status_t
4834bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4835 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4836{
4837 bfa_trc(diag, beacon);
4838 bfa_trc(diag, link_e2e_beacon);
4839 bfa_trc(diag, sec);
4840
4841 if (!bfa_ioc_is_operational(diag->ioc))
4842 return BFA_STATUS_IOC_NON_OP;
4843
4844 if (diag->ledtest.lock)
4845 return BFA_STATUS_LEDTEST_OP;
4846
4847 if (diag->beacon.state && beacon) /* beacon alread on */
4848 return BFA_STATUS_BEACON_ON;
4849
4850 diag->beacon.state = beacon;
4851 diag->beacon.link_e2e = link_e2e_beacon;
4852 if (diag->cbfn_beacon)
4853 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4854
4855 /* Send msg to fw */
4856 diag_portbeacon_send(diag, beacon, sec);
4857
4858 return BFA_STATUS_OK;
4859}
4860
4861/*
4862 * Return DMA memory needed by diag module.
4863 */
4864u32
4865bfa_diag_meminfo(void)
4866{
4867 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4868}
4869
4870/*
4871 * Attach virtual and physical memory for Diag.
4872 */
4873void
4874bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
4875 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
4876{
4877 diag->dev = dev;
4878 diag->ioc = ioc;
4879 diag->trcmod = trcmod;
4880
4881 diag->block = 0;
4882 diag->cbfn = NULL;
4883 diag->cbarg = NULL;
4884 diag->result = NULL;
4885 diag->cbfn_beacon = cbfn_beacon;
4886
4887 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
4888 bfa_q_qe_init(&diag->ioc_notify);
4889 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
4890 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
4891}
4892
4893void
4894bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
4895{
4896 diag->fwping.dbuf_kva = dm_kva;
4897 diag->fwping.dbuf_pa = dm_pa;
4898 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
4899}
4900
4901/*
4902 * PHY module specific
4903 */
4904#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
4905#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
4906
4907static void
4908bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
4909{
4910 int i, m = sz >> 2;
4911
4912 for (i = 0; i < m; i++)
4913 obuf[i] = be32_to_cpu(ibuf[i]);
4914}
4915
4916static bfa_boolean_t
4917bfa_phy_present(struct bfa_phy_s *phy)
4918{
4919 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
4920}
4921
4922static void
4923bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
4924{
4925 struct bfa_phy_s *phy = cbarg;
4926
4927 bfa_trc(phy, event);
4928
4929 switch (event) {
4930 case BFA_IOC_E_DISABLED:
4931 case BFA_IOC_E_FAILED:
4932 if (phy->op_busy) {
4933 phy->status = BFA_STATUS_IOC_FAILURE;
4934 phy->cbfn(phy->cbarg, phy->status);
4935 phy->op_busy = 0;
4936 }
4937 break;
4938
4939 default:
4940 break;
4941 }
4942}
4943
4944/*
4945 * Send phy attribute query request.
4946 *
4947 * @param[in] cbarg - callback argument
4948 */
4949static void
4950bfa_phy_query_send(void *cbarg)
4951{
4952 struct bfa_phy_s *phy = cbarg;
4953 struct bfi_phy_query_req_s *msg =
4954 (struct bfi_phy_query_req_s *) phy->mb.msg;
4955
4956 msg->instance = phy->instance;
4957 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
4958 bfa_ioc_portid(phy->ioc));
4959 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
4960 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4961}
4962
4963/*
4964 * Send phy write request.
4965 *
4966 * @param[in] cbarg - callback argument
4967 */
4968static void
4969bfa_phy_write_send(void *cbarg)
4970{
4971 struct bfa_phy_s *phy = cbarg;
4972 struct bfi_phy_write_req_s *msg =
4973 (struct bfi_phy_write_req_s *) phy->mb.msg;
4974 u32 len;
4975 u16 *buf, *dbuf;
4976 int i, sz;
4977
4978 msg->instance = phy->instance;
4979 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
4980 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
4981 phy->residue : BFA_PHY_DMA_BUF_SZ;
4982 msg->length = cpu_to_be32(len);
4983
4984 /* indicate if it's the last msg of the whole write operation */
4985 msg->last = (len == phy->residue) ? 1 : 0;
4986
4987 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
4988 bfa_ioc_portid(phy->ioc));
4989 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
4990
4991 buf = (u16 *) (phy->ubuf + phy->offset);
4992 dbuf = (u16 *)phy->dbuf_kva;
4993 sz = len >> 1;
4994 for (i = 0; i < sz; i++)
4995 buf[i] = cpu_to_be16(dbuf[i]);
4996
4997 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4998
4999 phy->residue -= len;
5000 phy->offset += len;
5001}
5002
5003/*
5004 * Send phy read request.
5005 *
5006 * @param[in] cbarg - callback argument
5007 */
5008static void
5009bfa_phy_read_send(void *cbarg)
5010{
5011 struct bfa_phy_s *phy = cbarg;
5012 struct bfi_phy_read_req_s *msg =
5013 (struct bfi_phy_read_req_s *) phy->mb.msg;
5014 u32 len;
5015
5016 msg->instance = phy->instance;
5017 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5018 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5019 phy->residue : BFA_PHY_DMA_BUF_SZ;
5020 msg->length = cpu_to_be32(len);
5021 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5022 bfa_ioc_portid(phy->ioc));
5023 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5024 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5025}
5026
5027/*
5028 * Send phy stats request.
5029 *
5030 * @param[in] cbarg - callback argument
5031 */
5032static void
5033bfa_phy_stats_send(void *cbarg)
5034{
5035 struct bfa_phy_s *phy = cbarg;
5036 struct bfi_phy_stats_req_s *msg =
5037 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5038
5039 msg->instance = phy->instance;
5040 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5041 bfa_ioc_portid(phy->ioc));
5042 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5043 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5044}
5045
5046/*
5047 * Flash memory info API.
5048 *
5049 * @param[in] mincfg - minimal cfg variable
5050 */
5051u32
5052bfa_phy_meminfo(bfa_boolean_t mincfg)
5053{
5054 /* min driver doesn't need phy */
5055 if (mincfg)
5056 return 0;
5057
5058 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5059}
5060
5061/*
5062 * Flash attach API.
5063 *
5064 * @param[in] phy - phy structure
5065 * @param[in] ioc - ioc structure
5066 * @param[in] dev - device structure
5067 * @param[in] trcmod - trace module
5068 * @param[in] logmod - log module
5069 */
5070void
5071bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5072 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5073{
5074 phy->ioc = ioc;
5075 phy->trcmod = trcmod;
5076 phy->cbfn = NULL;
5077 phy->cbarg = NULL;
5078 phy->op_busy = 0;
5079
5080 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5081 bfa_q_qe_init(&phy->ioc_notify);
5082 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5083 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5084
5085 /* min driver doesn't need phy */
5086 if (mincfg) {
5087 phy->dbuf_kva = NULL;
5088 phy->dbuf_pa = 0;
5089 }
5090}
5091
5092/*
5093 * Claim memory for phy
5094 *
5095 * @param[in] phy - phy structure
5096 * @param[in] dm_kva - pointer to virtual memory address
5097 * @param[in] dm_pa - physical memory address
5098 * @param[in] mincfg - minimal cfg variable
5099 */
5100void
5101bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5102 bfa_boolean_t mincfg)
5103{
5104 if (mincfg)
5105 return;
5106
5107 phy->dbuf_kva = dm_kva;
5108 phy->dbuf_pa = dm_pa;
5109 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5110 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5111 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5112}
5113
5114bfa_boolean_t
5115bfa_phy_busy(struct bfa_ioc_s *ioc)
5116{
5117 void __iomem *rb;
5118
5119 rb = bfa_ioc_bar0(ioc);
5120 return readl(rb + BFA_PHY_LOCK_STATUS);
5121}
5122
5123/*
5124 * Get phy attribute.
5125 *
5126 * @param[in] phy - phy structure
5127 * @param[in] attr - phy attribute structure
5128 * @param[in] cbfn - callback function
5129 * @param[in] cbarg - callback argument
5130 *
5131 * Return status.
5132 */
5133bfa_status_t
5134bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5135 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5136{
5137 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5138 bfa_trc(phy, instance);
5139
5140 if (!bfa_phy_present(phy))
5141 return BFA_STATUS_PHY_NOT_PRESENT;
5142
5143 if (!bfa_ioc_is_operational(phy->ioc))
5144 return BFA_STATUS_IOC_NON_OP;
5145
5146 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5147 bfa_trc(phy, phy->op_busy);
5148 return BFA_STATUS_DEVBUSY;
5149 }
5150
5151 phy->op_busy = 1;
5152 phy->cbfn = cbfn;
5153 phy->cbarg = cbarg;
5154 phy->instance = instance;
5155 phy->ubuf = (uint8_t *) attr;
5156 bfa_phy_query_send(phy);
5157
5158 return BFA_STATUS_OK;
5159}
5160
5161/*
5162 * Get phy stats.
5163 *
5164 * @param[in] phy - phy structure
5165 * @param[in] instance - phy image instance
5166 * @param[in] stats - pointer to phy stats
5167 * @param[in] cbfn - callback function
5168 * @param[in] cbarg - callback argument
5169 *
5170 * Return status.
5171 */
5172bfa_status_t
5173bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5174 struct bfa_phy_stats_s *stats,
5175 bfa_cb_phy_t cbfn, void *cbarg)
5176{
5177 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5178 bfa_trc(phy, instance);
5179
5180 if (!bfa_phy_present(phy))
5181 return BFA_STATUS_PHY_NOT_PRESENT;
5182
5183 if (!bfa_ioc_is_operational(phy->ioc))
5184 return BFA_STATUS_IOC_NON_OP;
5185
5186 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5187 bfa_trc(phy, phy->op_busy);
5188 return BFA_STATUS_DEVBUSY;
5189 }
5190
5191 phy->op_busy = 1;
5192 phy->cbfn = cbfn;
5193 phy->cbarg = cbarg;
5194 phy->instance = instance;
5195 phy->ubuf = (u8 *) stats;
5196 bfa_phy_stats_send(phy);
5197
5198 return BFA_STATUS_OK;
5199}
5200
5201/*
5202 * Update phy image.
5203 *
5204 * @param[in] phy - phy structure
5205 * @param[in] instance - phy image instance
5206 * @param[in] buf - update data buffer
5207 * @param[in] len - data buffer length
5208 * @param[in] offset - offset relative to starting address
5209 * @param[in] cbfn - callback function
5210 * @param[in] cbarg - callback argument
5211 *
5212 * Return status.
5213 */
5214bfa_status_t
5215bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5216 void *buf, u32 len, u32 offset,
5217 bfa_cb_phy_t cbfn, void *cbarg)
5218{
5219 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5220 bfa_trc(phy, instance);
5221 bfa_trc(phy, len);
5222 bfa_trc(phy, offset);
5223
5224 if (!bfa_phy_present(phy))
5225 return BFA_STATUS_PHY_NOT_PRESENT;
5226
5227 if (!bfa_ioc_is_operational(phy->ioc))
5228 return BFA_STATUS_IOC_NON_OP;
5229
5230 /* 'len' must be in word (4-byte) boundary */
5231 if (!len || (len & 0x03))
5232 return BFA_STATUS_FAILED;
5233
5234 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5235 bfa_trc(phy, phy->op_busy);
5236 return BFA_STATUS_DEVBUSY;
5237 }
5238
5239 phy->op_busy = 1;
5240 phy->cbfn = cbfn;
5241 phy->cbarg = cbarg;
5242 phy->instance = instance;
5243 phy->residue = len;
5244 phy->offset = 0;
5245 phy->addr_off = offset;
5246 phy->ubuf = buf;
5247
5248 bfa_phy_write_send(phy);
5249 return BFA_STATUS_OK;
5250}
5251
5252/*
5253 * Read phy image.
5254 *
5255 * @param[in] phy - phy structure
5256 * @param[in] instance - phy image instance
5257 * @param[in] buf - read data buffer
5258 * @param[in] len - data buffer length
5259 * @param[in] offset - offset relative to starting address
5260 * @param[in] cbfn - callback function
5261 * @param[in] cbarg - callback argument
5262 *
5263 * Return status.
5264 */
5265bfa_status_t
5266bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5267 void *buf, u32 len, u32 offset,
5268 bfa_cb_phy_t cbfn, void *cbarg)
5269{
5270 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5271 bfa_trc(phy, instance);
5272 bfa_trc(phy, len);
5273 bfa_trc(phy, offset);
5274
5275 if (!bfa_phy_present(phy))
5276 return BFA_STATUS_PHY_NOT_PRESENT;
5277
5278 if (!bfa_ioc_is_operational(phy->ioc))
5279 return BFA_STATUS_IOC_NON_OP;
5280
5281 /* 'len' must be in word (4-byte) boundary */
5282 if (!len || (len & 0x03))
5283 return BFA_STATUS_FAILED;
5284
5285 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5286 bfa_trc(phy, phy->op_busy);
5287 return BFA_STATUS_DEVBUSY;
5288 }
5289
5290 phy->op_busy = 1;
5291 phy->cbfn = cbfn;
5292 phy->cbarg = cbarg;
5293 phy->instance = instance;
5294 phy->residue = len;
5295 phy->offset = 0;
5296 phy->addr_off = offset;
5297 phy->ubuf = buf;
5298 bfa_phy_read_send(phy);
5299
5300 return BFA_STATUS_OK;
5301}
5302
5303/*
5304 * Process phy response messages upon receiving interrupts.
5305 *
5306 * @param[in] phyarg - phy structure
5307 * @param[in] msg - message structure
5308 */
5309void
5310bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5311{
5312 struct bfa_phy_s *phy = phyarg;
5313 u32 status;
5314
5315 union {
5316 struct bfi_phy_query_rsp_s *query;
5317 struct bfi_phy_stats_rsp_s *stats;
5318 struct bfi_phy_write_rsp_s *write;
5319 struct bfi_phy_read_rsp_s *read;
5320 struct bfi_mbmsg_s *msg;
5321 } m;
5322
5323 m.msg = msg;
5324 bfa_trc(phy, msg->mh.msg_id);
5325
5326 if (!phy->op_busy) {
5327 /* receiving response after ioc failure */
5328 bfa_trc(phy, 0x9999);
5329 return;
5330 }
5331
5332 switch (msg->mh.msg_id) {
5333 case BFI_PHY_I2H_QUERY_RSP:
5334 status = be32_to_cpu(m.query->status);
5335 bfa_trc(phy, status);
5336
5337 if (status == BFA_STATUS_OK) {
5338 struct bfa_phy_attr_s *attr =
5339 (struct bfa_phy_attr_s *) phy->ubuf;
5340 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5341 sizeof(struct bfa_phy_attr_s));
5342 bfa_trc(phy, attr->status);
5343 bfa_trc(phy, attr->length);
5344 }
5345
5346 phy->status = status;
5347 phy->op_busy = 0;
5348 if (phy->cbfn)
5349 phy->cbfn(phy->cbarg, phy->status);
5350 break;
5351 case BFI_PHY_I2H_STATS_RSP:
5352 status = be32_to_cpu(m.stats->status);
5353 bfa_trc(phy, status);
5354
5355 if (status == BFA_STATUS_OK) {
5356 struct bfa_phy_stats_s *stats =
5357 (struct bfa_phy_stats_s *) phy->ubuf;
5358 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5359 sizeof(struct bfa_phy_stats_s));
5360 bfa_trc(phy, stats->status);
5361 }
5362
5363 phy->status = status;
5364 phy->op_busy = 0;
5365 if (phy->cbfn)
5366 phy->cbfn(phy->cbarg, phy->status);
5367 break;
5368 case BFI_PHY_I2H_WRITE_RSP:
5369 status = be32_to_cpu(m.write->status);
5370 bfa_trc(phy, status);
5371
5372 if (status != BFA_STATUS_OK || phy->residue == 0) {
5373 phy->status = status;
5374 phy->op_busy = 0;
5375 if (phy->cbfn)
5376 phy->cbfn(phy->cbarg, phy->status);
5377 } else {
5378 bfa_trc(phy, phy->offset);
5379 bfa_phy_write_send(phy);
5380 }
5381 break;
5382 case BFI_PHY_I2H_READ_RSP:
5383 status = be32_to_cpu(m.read->status);
5384 bfa_trc(phy, status);
5385
5386 if (status != BFA_STATUS_OK) {
5387 phy->status = status;
5388 phy->op_busy = 0;
5389 if (phy->cbfn)
5390 phy->cbfn(phy->cbarg, phy->status);
5391 } else {
5392 u32 len = be32_to_cpu(m.read->length);
5393 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5394 u16 *dbuf = (u16 *)phy->dbuf_kva;
5395 int i, sz = len >> 1;
5396
5397 bfa_trc(phy, phy->offset);
5398 bfa_trc(phy, len);
5399
5400 for (i = 0; i < sz; i++)
5401 buf[i] = be16_to_cpu(dbuf[i]);
5402
5403 phy->residue -= len;
5404 phy->offset += len;
5405
5406 if (phy->residue == 0) {
5407 phy->status = status;
5408 phy->op_busy = 0;
5409 if (phy->cbfn)
5410 phy->cbfn(phy->cbarg, phy->status);
5411 } else
5412 bfa_phy_read_send(phy);
5413 }
5414 break;
5415 default:
5416 WARN_ON(1);
5417 }
5418}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c85182a704fb..c5ecd2edc95d 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -85,12 +85,75 @@ struct bfa_sge_s {
85#endif 85#endif
86 86
87/* 87/*
88 * BFA memory resources
89 */
90struct bfa_mem_dma_s {
91 struct list_head qe; /* Queue of DMA elements */
92 u32 mem_len; /* Total Length in Bytes */
93 u8 *kva; /* kernel virtual address */
94 u64 dma; /* dma address if DMA memory */
95 u8 *kva_curp; /* kva allocation cursor */
96 u64 dma_curp; /* dma allocation cursor */
97};
98#define bfa_mem_dma_t struct bfa_mem_dma_s
99
100struct bfa_mem_kva_s {
101 struct list_head qe; /* Queue of KVA elements */
102 u32 mem_len; /* Total Length in Bytes */
103 u8 *kva; /* kernel virtual address */
104 u8 *kva_curp; /* kva allocation cursor */
105};
106#define bfa_mem_kva_t struct bfa_mem_kva_s
107
108struct bfa_meminfo_s {
109 struct bfa_mem_dma_s dma_info;
110 struct bfa_mem_kva_s kva_info;
111};
112
113/* BFA memory segment setup macros */
114#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
115 ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
116 if (_seg_sz) \
117 list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
118 &(_meminfo)->dma_info.qe); \
119} while (0)
120
121#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
122 ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
123 if (_seg_sz) \
124 list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
125 &(_meminfo)->kva_info.qe); \
126} while (0)
127
128/* BFA dma memory segments iterator */
129#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
130#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \
131 for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \
132 _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
133
134#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
135#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp)
136#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp)
137#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
138
139/* Get the corresponding dma buf kva for a req - from the tag */
140#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \
141 (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
142 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
143
144/* Get the corresponding dma buf pa for a req - from the tag */
145#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \
146 ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \
147 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
148
149/*
88 * PCI device information required by IOC 150 * PCI device information required by IOC
89 */ 151 */
90struct bfa_pcidev_s { 152struct bfa_pcidev_s {
91 int pci_slot; 153 int pci_slot;
92 u8 pci_func; 154 u8 pci_func;
93 u16 device_id; 155 u16 device_id;
156 u16 ssid;
94 void __iomem *pci_bar_kva; 157 void __iomem *pci_bar_kva;
95}; 158};
96 159
@@ -112,18 +175,6 @@ struct bfa_dma_s {
112#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ 175#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
113#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ 176#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
114 177
115
116#define bfa_dma_addr_set(dma_addr, pa) \
117 __bfa_dma_addr_set(&dma_addr, (u64)pa)
118
119static inline void
120__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
121{
122 dma_addr->a32.addr_lo = (__be32) pa;
123 dma_addr->a32.addr_hi = (__be32) (pa >> 32);
124}
125
126
127#define bfa_dma_be_addr_set(dma_addr, pa) \ 178#define bfa_dma_be_addr_set(dma_addr, pa) \
128 __bfa_dma_be_addr_set(&dma_addr, (u64)pa) 179 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
129static inline void 180static inline void
@@ -133,11 +184,22 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
133 dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32); 184 dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
134} 185}
135 186
187#define bfa_alen_set(__alen, __len, __pa) \
188 __bfa_alen_set(__alen, __len, (u64)__pa)
189
190static inline void
191__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
192{
193 alen->al_len = cpu_to_be32(len);
194 bfa_dma_be_addr_set(alen->al_addr, pa);
195}
196
136struct bfa_ioc_regs_s { 197struct bfa_ioc_regs_s {
137 void __iomem *hfn_mbox_cmd; 198 void __iomem *hfn_mbox_cmd;
138 void __iomem *hfn_mbox; 199 void __iomem *hfn_mbox;
139 void __iomem *lpu_mbox_cmd; 200 void __iomem *lpu_mbox_cmd;
140 void __iomem *lpu_mbox; 201 void __iomem *lpu_mbox;
202 void __iomem *lpu_read_stat;
141 void __iomem *pss_ctl_reg; 203 void __iomem *pss_ctl_reg;
142 void __iomem *pss_err_status_reg; 204 void __iomem *pss_err_status_reg;
143 void __iomem *app_pll_fast_ctl_reg; 205 void __iomem *app_pll_fast_ctl_reg;
@@ -199,18 +261,26 @@ struct bfa_ioc_cbfn_s {
199}; 261};
200 262
201/* 263/*
202 * Heartbeat failure notification queue element. 264 * IOC event notification mechanism.
203 */ 265 */
204struct bfa_ioc_hbfail_notify_s { 266enum bfa_ioc_event_e {
267 BFA_IOC_E_ENABLED = 1,
268 BFA_IOC_E_DISABLED = 2,
269 BFA_IOC_E_FAILED = 3,
270};
271
272typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
273
274struct bfa_ioc_notify_s {
205 struct list_head qe; 275 struct list_head qe;
206 bfa_ioc_hbfail_cbfn_t cbfn; 276 bfa_ioc_notify_cbfn_t cbfn;
207 void *cbarg; 277 void *cbarg;
208}; 278};
209 279
210/* 280/*
211 * Initialize a heartbeat failure notification structure 281 * Initialize a IOC event notification structure
212 */ 282 */
213#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \ 283#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
214 (__notify)->cbfn = (__cbfn); \ 284 (__notify)->cbfn = (__cbfn); \
215 (__notify)->cbarg = (__cbarg); \ 285 (__notify)->cbarg = (__cbarg); \
216} while (0) 286} while (0)
@@ -218,8 +288,9 @@ struct bfa_ioc_hbfail_notify_s {
218struct bfa_iocpf_s { 288struct bfa_iocpf_s {
219 bfa_fsm_t fsm; 289 bfa_fsm_t fsm;
220 struct bfa_ioc_s *ioc; 290 struct bfa_ioc_s *ioc;
221 u32 retry_count; 291 bfa_boolean_t fw_mismatch_notified;
222 bfa_boolean_t auto_recover; 292 bfa_boolean_t auto_recover;
293 u32 poll_time;
223}; 294};
224 295
225struct bfa_ioc_s { 296struct bfa_ioc_s {
@@ -231,17 +302,15 @@ struct bfa_ioc_s {
231 struct bfa_timer_s sem_timer; 302 struct bfa_timer_s sem_timer;
232 struct bfa_timer_s hb_timer; 303 struct bfa_timer_s hb_timer;
233 u32 hb_count; 304 u32 hb_count;
234 struct list_head hb_notify_q; 305 struct list_head notify_q;
235 void *dbg_fwsave; 306 void *dbg_fwsave;
236 int dbg_fwsave_len; 307 int dbg_fwsave_len;
237 bfa_boolean_t dbg_fwsave_once; 308 bfa_boolean_t dbg_fwsave_once;
238 enum bfi_mclass ioc_mc; 309 enum bfi_pcifn_class clscode;
239 struct bfa_ioc_regs_s ioc_regs; 310 struct bfa_ioc_regs_s ioc_regs;
240 struct bfa_trc_mod_s *trcmod; 311 struct bfa_trc_mod_s *trcmod;
241 struct bfa_ioc_drv_stats_s stats; 312 struct bfa_ioc_drv_stats_s stats;
242 bfa_boolean_t fcmode; 313 bfa_boolean_t fcmode;
243 bfa_boolean_t ctdev;
244 bfa_boolean_t cna;
245 bfa_boolean_t pllinit; 314 bfa_boolean_t pllinit;
246 bfa_boolean_t stats_busy; /* outstanding stats */ 315 bfa_boolean_t stats_busy; /* outstanding stats */
247 u8 port_id; 316 u8 port_id;
@@ -251,10 +320,17 @@ struct bfa_ioc_s {
251 struct bfa_ioc_mbox_mod_s mbox_mod; 320 struct bfa_ioc_mbox_mod_s mbox_mod;
252 struct bfa_ioc_hwif_s *ioc_hwif; 321 struct bfa_ioc_hwif_s *ioc_hwif;
253 struct bfa_iocpf_s iocpf; 322 struct bfa_iocpf_s iocpf;
323 enum bfi_asic_gen asic_gen;
324 enum bfi_asic_mode asic_mode;
325 enum bfi_port_mode port0_mode;
326 enum bfi_port_mode port1_mode;
327 enum bfa_mode_s port_mode;
328 u8 ad_cap_bm; /* adapter cap bit mask */
329 u8 port_mode_cfg; /* config port mode */
254}; 330};
255 331
256struct bfa_ioc_hwif_s { 332struct bfa_ioc_hwif_s {
257 bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode); 333 bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
258 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); 334 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
259 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); 335 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
260 void (*ioc_reg_init) (struct bfa_ioc_s *ioc); 336 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -268,12 +344,356 @@ struct bfa_ioc_hwif_s {
268 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); 344 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
269 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); 345 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
270 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); 346 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
347 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
348};
349
350/*
351 * Queue element to wait for room in request queue. FIFO order is
352 * maintained when fullfilling requests.
353 */
354struct bfa_reqq_wait_s {
355 struct list_head qe;
356 void (*qresume) (void *cbarg);
357 void *cbarg;
358};
359
360typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
361
362/*
363 * Generic BFA callback element.
364 */
365struct bfa_cb_qe_s {
366 struct list_head qe;
367 bfa_cb_cbfn_t cbfn;
368 bfa_boolean_t once;
369 void *cbarg;
370};
371
372/*
373 * ASIC block configurtion related
374 */
375
376typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
377
378struct bfa_ablk_s {
379 struct bfa_ioc_s *ioc;
380 struct bfa_ablk_cfg_s *cfg;
381 u16 *pcifn;
382 struct bfa_dma_s dma_addr;
383 bfa_boolean_t busy;
384 struct bfa_mbox_cmd_s mb;
385 bfa_ablk_cbfn_t cbfn;
386 void *cbarg;
387 struct bfa_ioc_notify_s ioc_notify;
388 struct bfa_mem_dma_s ablk_dma;
389};
390#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma))
391
392/*
393 * SFP module specific
394 */
395typedef void (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
396
397struct bfa_sfp_s {
398 void *dev;
399 struct bfa_ioc_s *ioc;
400 struct bfa_trc_mod_s *trcmod;
401 struct sfp_mem_s *sfpmem;
402 bfa_cb_sfp_t cbfn;
403 void *cbarg;
404 enum bfi_sfp_mem_e memtype; /* mem access type */
405 u32 status;
406 struct bfa_mbox_cmd_s mbcmd;
407 u8 *dbuf_kva; /* dma buf virtual address */
408 u64 dbuf_pa; /* dma buf physical address */
409 struct bfa_ioc_notify_s ioc_notify;
410 enum bfa_defs_sfp_media_e *media;
411 enum bfa_port_speed portspeed;
412 bfa_cb_sfp_t state_query_cbfn;
413 void *state_query_cbarg;
414 u8 lock;
415 u8 data_valid; /* data in dbuf is valid */
416 u8 state; /* sfp state */
417 u8 state_query_lock;
418 struct bfa_mem_dma_s sfp_dma;
419 u8 is_elb; /* eloopback */
420};
421
422#define BFA_SFP_MOD(__bfa) (&(__bfa)->modules.sfp)
423#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma))
424
425u32 bfa_sfp_meminfo(void);
426
427void bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
428 void *dev, struct bfa_trc_mod_s *trcmod);
429
430void bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
431void bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
432
433bfa_status_t bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
434 bfa_cb_sfp_t cbfn, void *cbarg);
435
436bfa_status_t bfa_sfp_media(struct bfa_sfp_s *sfp,
437 enum bfa_defs_sfp_media_e *media,
438 bfa_cb_sfp_t cbfn, void *cbarg);
439
440bfa_status_t bfa_sfp_speed(struct bfa_sfp_s *sfp,
441 enum bfa_port_speed portspeed,
442 bfa_cb_sfp_t cbfn, void *cbarg);
443
444/*
445 * Flash module specific
446 */
447typedef void (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
448
449struct bfa_flash_s {
450 struct bfa_ioc_s *ioc; /* back pointer to ioc */
451 struct bfa_trc_mod_s *trcmod;
452 u32 type; /* partition type */
453 u8 instance; /* partition instance */
454 u8 rsv[3];
455 u32 op_busy; /* operation busy flag */
456 u32 residue; /* residual length */
457 u32 offset; /* offset */
458 bfa_status_t status; /* status */
459 u8 *dbuf_kva; /* dma buf virtual address */
460 u64 dbuf_pa; /* dma buf physical address */
461 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
462 bfa_cb_flash_t cbfn; /* user callback function */
463 void *cbarg; /* user callback arg */
464 u8 *ubuf; /* user supplied buffer */
465 struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
466 u32 addr_off; /* partition address offset */
467 struct bfa_mbox_cmd_s mb; /* mailbox */
468 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
469 struct bfa_mem_dma_s flash_dma;
470};
471
472#define BFA_FLASH(__bfa) (&(__bfa)->modules.flash)
473#define BFA_MEM_FLASH_DMA(__bfa) (&(BFA_FLASH(__bfa)->flash_dma))
474
475bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
476 struct bfa_flash_attr_s *attr,
477 bfa_cb_flash_t cbfn, void *cbarg);
478bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
479 enum bfa_flash_part_type type, u8 instance,
480 bfa_cb_flash_t cbfn, void *cbarg);
481bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
482 enum bfa_flash_part_type type, u8 instance,
483 void *buf, u32 len, u32 offset,
484 bfa_cb_flash_t cbfn, void *cbarg);
485bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
486 enum bfa_flash_part_type type, u8 instance, void *buf,
487 u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
488u32 bfa_flash_meminfo(bfa_boolean_t mincfg);
489void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
490 void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
491void bfa_flash_memclaim(struct bfa_flash_s *flash,
492 u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
493
494/*
495 * DIAG module specific
496 */
497
498typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
499typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
500 bfa_boolean_t link_e2e_beacon);
501
502/*
503 * Firmware ping test results
504 */
505struct bfa_diag_results_fwping {
506 u32 data; /* store the corrupted data */
507 u32 status;
508 u32 dmastatus;
509 u8 rsvd[4];
510};
511
512struct bfa_diag_qtest_result_s {
513 u32 status;
514 u16 count; /* sucessful queue test count */
515 u8 queue;
516 u8 rsvd; /* 64-bit align */
517};
518
519/*
520 * Firmware ping test results
521 */
522struct bfa_diag_fwping_s {
523 struct bfa_diag_results_fwping *result;
524 bfa_cb_diag_t cbfn;
525 void *cbarg;
526 u32 data;
527 u8 lock;
528 u8 rsv[3];
529 u32 status;
530 u32 count;
531 struct bfa_mbox_cmd_s mbcmd;
532 u8 *dbuf_kva; /* dma buf virtual address */
533 u64 dbuf_pa; /* dma buf physical address */
534};
535
536/*
537 * Temperature sensor query results
538 */
539struct bfa_diag_results_tempsensor_s {
540 u32 status;
541 u16 temp; /* 10-bit A/D value */
542 u16 brd_temp; /* 9-bit board temp */
543 u8 ts_junc; /* show junction tempsensor */
544 u8 ts_brd; /* show board tempsensor */
545 u8 rsvd[6]; /* keep 8 bytes alignment */
546};
547
548struct bfa_diag_tsensor_s {
549 bfa_cb_diag_t cbfn;
550 void *cbarg;
551 struct bfa_diag_results_tempsensor_s *temp;
552 u8 lock;
553 u8 rsv[3];
554 u32 status;
555 struct bfa_mbox_cmd_s mbcmd;
271}; 556};
272 557
558struct bfa_diag_sfpshow_s {
559 struct sfp_mem_s *sfpmem;
560 bfa_cb_diag_t cbfn;
561 void *cbarg;
562 u8 lock;
563 u8 static_data;
564 u8 rsv[2];
565 u32 status;
566 struct bfa_mbox_cmd_s mbcmd;
567 u8 *dbuf_kva; /* dma buf virtual address */
568 u64 dbuf_pa; /* dma buf physical address */
569};
570
571struct bfa_diag_led_s {
572 struct bfa_mbox_cmd_s mbcmd;
573 bfa_boolean_t lock; /* 1: ledtest is operating */
574};
575
576struct bfa_diag_beacon_s {
577 struct bfa_mbox_cmd_s mbcmd;
578 bfa_boolean_t state; /* port beacon state */
579 bfa_boolean_t link_e2e; /* link beacon state */
580};
581
582struct bfa_diag_s {
583 void *dev;
584 struct bfa_ioc_s *ioc;
585 struct bfa_trc_mod_s *trcmod;
586 struct bfa_diag_fwping_s fwping;
587 struct bfa_diag_tsensor_s tsensor;
588 struct bfa_diag_sfpshow_s sfpshow;
589 struct bfa_diag_led_s ledtest;
590 struct bfa_diag_beacon_s beacon;
591 void *result;
592 struct bfa_timer_s timer;
593 bfa_cb_diag_beacon_t cbfn_beacon;
594 bfa_cb_diag_t cbfn;
595 void *cbarg;
596 u8 block;
597 u8 timer_active;
598 u8 rsvd[2];
599 u32 status;
600 struct bfa_ioc_notify_s ioc_notify;
601 struct bfa_mem_dma_s diag_dma;
602};
603
604#define BFA_DIAG_MOD(__bfa) (&(__bfa)->modules.diag_mod)
605#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
606
607u32 bfa_diag_meminfo(void);
608void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
609void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
610 bfa_cb_diag_beacon_t cbfn_beacon,
611 struct bfa_trc_mod_s *trcmod);
612bfa_status_t bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
613 u32 len, u32 *buf, u32 force);
614bfa_status_t bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
615 u32 len, u32 value, u32 force);
616bfa_status_t bfa_diag_tsensor_query(struct bfa_diag_s *diag,
617 struct bfa_diag_results_tempsensor_s *result,
618 bfa_cb_diag_t cbfn, void *cbarg);
619bfa_status_t bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
620 u32 pattern, struct bfa_diag_results_fwping *result,
621 bfa_cb_diag_t cbfn, void *cbarg);
622bfa_status_t bfa_diag_sfpshow(struct bfa_diag_s *diag,
623 struct sfp_mem_s *sfpmem, u8 static_data,
624 bfa_cb_diag_t cbfn, void *cbarg);
625bfa_status_t bfa_diag_memtest(struct bfa_diag_s *diag,
626 struct bfa_diag_memtest_s *memtest, u32 pattern,
627 struct bfa_diag_memtest_result *result,
628 bfa_cb_diag_t cbfn, void *cbarg);
629bfa_status_t bfa_diag_ledtest(struct bfa_diag_s *diag,
630 struct bfa_diag_ledtest_s *ledtest);
631bfa_status_t bfa_diag_beacon_port(struct bfa_diag_s *diag,
632 bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
633 u32 sec);
634
635/*
636 * PHY module specific
637 */
638typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
639
640struct bfa_phy_s {
641 struct bfa_ioc_s *ioc; /* back pointer to ioc */
642 struct bfa_trc_mod_s *trcmod; /* trace module */
643 u8 instance; /* port instance */
644 u8 op_busy; /* operation busy flag */
645 u8 rsv[2];
646 u32 residue; /* residual length */
647 u32 offset; /* offset */
648 bfa_status_t status; /* status */
649 u8 *dbuf_kva; /* dma buf virtual address */
650 u64 dbuf_pa; /* dma buf physical address */
651 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
652 bfa_cb_phy_t cbfn; /* user callback function */
653 void *cbarg; /* user callback arg */
654 u8 *ubuf; /* user supplied buffer */
655 struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
656 u32 addr_off; /* phy address offset */
657 struct bfa_mbox_cmd_s mb; /* mailbox */
658 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
659 struct bfa_mem_dma_s phy_dma;
660};
661
662#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
663#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
664
665bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
666bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
667 struct bfa_phy_attr_s *attr,
668 bfa_cb_phy_t cbfn, void *cbarg);
669bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
670 struct bfa_phy_stats_s *stats,
671 bfa_cb_phy_t cbfn, void *cbarg);
672bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
673 void *buf, u32 len, u32 offset,
674 bfa_cb_phy_t cbfn, void *cbarg);
675bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
676 void *buf, u32 len, u32 offset,
677 bfa_cb_phy_t cbfn, void *cbarg);
678
679u32 bfa_phy_meminfo(bfa_boolean_t mincfg);
680void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
681 void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
682void bfa_phy_memclaim(struct bfa_phy_s *phy,
683 u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
684void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
685
686/*
687 * IOC specfic macros
688 */
273#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 689#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
274#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) 690#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
275#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) 691#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
276#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 692#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
693#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
694#define bfa_ioc_is_cna(__ioc) \
695 ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) || \
696 (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
277#define bfa_ioc_fetch_stats(__ioc, __stats) \ 697#define bfa_ioc_fetch_stats(__ioc, __stats) \
278 (((__stats)->drv_stats) = (__ioc)->stats) 698 (((__stats)->drv_stats) = (__ioc)->stats)
279#define bfa_ioc_clr_stats(__ioc) \ 699#define bfa_ioc_clr_stats(__ioc) \
@@ -287,12 +707,9 @@ struct bfa_ioc_hwif_s {
287 707
288#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) 708#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
289#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 709#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
290#define BFA_IOC_FWIMG_TYPE(__ioc) \ 710#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
291 (((__ioc)->ctdev) ? \ 711 ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
292 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ 712 ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
293 BFI_IMAGE_CB_FC)
294#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
295 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
296#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 713#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
297#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 714#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
298#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 715#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
@@ -305,7 +722,7 @@ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
305 bfa_ioc_mbox_mcfunc_t *mcfuncs); 722 bfa_ioc_mbox_mcfunc_t *mcfuncs);
306void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc); 723void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
307void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len); 724void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
308void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg); 725bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
309void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, 726void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
310 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 727 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
311 728
@@ -315,40 +732,49 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
315 732
316#define bfa_ioc_pll_init_asic(__ioc) \ 733#define bfa_ioc_pll_init_asic(__ioc) \
317 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ 734 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
318 (__ioc)->fcmode)) 735 (__ioc)->asic_mode))
319 736
320bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); 737bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
321bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode); 738bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
322bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb); 739bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
323bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode); 740bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
324 741
325#define bfa_ioc_isr_mode_set(__ioc, __msix) \ 742#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
326 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 743 if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
744 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
745} while (0)
327#define bfa_ioc_ownership_reset(__ioc) \ 746#define bfa_ioc_ownership_reset(__ioc) \
328 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 747 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
748#define bfa_ioc_get_fcmode(__ioc) ((__ioc)->fcmode)
749#define bfa_ioc_lpu_read_stat(__ioc) do { \
750 if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
751 ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
752} while (0)
329 753
330
331void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
332void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); 754void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
755void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
756void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
757void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
333 758
334void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, 759void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
335 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); 760 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
336void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); 761void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
337void bfa_ioc_detach(struct bfa_ioc_s *ioc); 762void bfa_ioc_detach(struct bfa_ioc_s *ioc);
338void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 763void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
339 enum bfi_mclass mc); 764 enum bfi_pcifn_class clscode);
340void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); 765void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
341void bfa_ioc_enable(struct bfa_ioc_s *ioc); 766void bfa_ioc_enable(struct bfa_ioc_s *ioc);
342void bfa_ioc_disable(struct bfa_ioc_s *ioc); 767void bfa_ioc_disable(struct bfa_ioc_s *ioc);
343bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); 768bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
344 769
345void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, 770void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
346 u32 boot_param); 771 u32 boot_env);
347void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); 772void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
348void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); 773void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
349bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); 774bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
350bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); 775bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
351bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 776bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
777bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
352bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 778bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
353bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 779bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
354void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc); 780void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
@@ -372,8 +798,6 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
372 int *trclen); 798 int *trclen);
373bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, 799bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
374 u32 *offset, int *buflen); 800 u32 *offset, int *buflen);
375void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
376bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
377bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); 801bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
378void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, 802void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
379 struct bfi_ioc_image_hdr_s *fwhdr); 803 struct bfi_ioc_image_hdr_s *fwhdr);
@@ -383,6 +807,33 @@ bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
383bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); 807bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
384 808
385/* 809/*
810 * asic block configuration related APIs
811 */
812u32 bfa_ablk_meminfo(void);
813void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
814void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
815bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
816 struct bfa_ablk_cfg_s *ablk_cfg,
817 bfa_ablk_cbfn_t cbfn, void *cbarg);
818bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
819 enum bfa_mode_s mode, int max_pf, int max_vf,
820 bfa_ablk_cbfn_t cbfn, void *cbarg);
821bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
822 enum bfa_mode_s mode, int max_pf, int max_vf,
823 bfa_ablk_cbfn_t cbfn, void *cbarg);
824bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
825 u8 port, enum bfi_pcifn_class personality, int bw,
826 bfa_ablk_cbfn_t cbfn, void *cbarg);
827bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
828 bfa_ablk_cbfn_t cbfn, void *cbarg);
829bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
830 bfa_ablk_cbfn_t cbfn, void *cbarg);
831bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
832 bfa_ablk_cbfn_t cbfn, void *cbarg);
833bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
834 bfa_ablk_cbfn_t cbfn, void *cbarg);
835
836/*
386 * bfa mfg wwn API functions 837 * bfa mfg wwn API functions
387 */ 838 */
388mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); 839mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
@@ -391,50 +842,64 @@ mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
391/* 842/*
392 * F/W Image Size & Chunk 843 * F/W Image Size & Chunk
393 */ 844 */
394extern u32 bfi_image_ct_fc_size; 845extern u32 bfi_image_cb_size;
395extern u32 bfi_image_ct_cna_size; 846extern u32 bfi_image_ct_size;
396extern u32 bfi_image_cb_fc_size; 847extern u32 bfi_image_ct2_size;
397extern u32 *bfi_image_ct_fc; 848extern u32 *bfi_image_cb;
398extern u32 *bfi_image_ct_cna; 849extern u32 *bfi_image_ct;
399extern u32 *bfi_image_cb_fc; 850extern u32 *bfi_image_ct2;
400 851
401static inline u32 * 852static inline u32 *
402bfi_image_ct_fc_get_chunk(u32 off) 853bfi_image_cb_get_chunk(u32 off)
403{ return (u32 *)(bfi_image_ct_fc + off); } 854{
855 return (u32 *)(bfi_image_cb + off);
856}
404 857
405static inline u32 * 858static inline u32 *
406bfi_image_ct_cna_get_chunk(u32 off) 859bfi_image_ct_get_chunk(u32 off)
407{ return (u32 *)(bfi_image_ct_cna + off); } 860{
861 return (u32 *)(bfi_image_ct + off);
862}
408 863
409static inline u32 * 864static inline u32 *
410bfi_image_cb_fc_get_chunk(u32 off) 865bfi_image_ct2_get_chunk(u32 off)
411{ return (u32 *)(bfi_image_cb_fc + off); } 866{
867 return (u32 *)(bfi_image_ct2 + off);
868}
412 869
413static inline u32* 870static inline u32*
414bfa_cb_image_get_chunk(int type, u32 off) 871bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
415{ 872{
416 switch (type) { 873 switch (asic_gen) {
417 case BFI_IMAGE_CT_FC: 874 case BFI_ASIC_GEN_CB:
418 return bfi_image_ct_fc_get_chunk(off); break; 875 return bfi_image_cb_get_chunk(off);
419 case BFI_IMAGE_CT_CNA: 876 break;
420 return bfi_image_ct_cna_get_chunk(off); break; 877 case BFI_ASIC_GEN_CT:
421 case BFI_IMAGE_CB_FC: 878 return bfi_image_ct_get_chunk(off);
422 return bfi_image_cb_fc_get_chunk(off); break; 879 break;
423 default: return NULL; 880 case BFI_ASIC_GEN_CT2:
881 return bfi_image_ct2_get_chunk(off);
882 break;
883 default:
884 return NULL;
424 } 885 }
425} 886}
426 887
427static inline u32 888static inline u32
428bfa_cb_image_get_size(int type) 889bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
429{ 890{
430 switch (type) { 891 switch (asic_gen) {
431 case BFI_IMAGE_CT_FC: 892 case BFI_ASIC_GEN_CB:
432 return bfi_image_ct_fc_size; break; 893 return bfi_image_cb_size;
433 case BFI_IMAGE_CT_CNA: 894 break;
434 return bfi_image_ct_cna_size; break; 895 case BFI_ASIC_GEN_CT:
435 case BFI_IMAGE_CB_FC: 896 return bfi_image_ct_size;
436 return bfi_image_cb_fc_size; break; 897 break;
437 default: return 0; 898 case BFI_ASIC_GEN_CT2:
899 return bfi_image_ct2_size;
900 break;
901 default:
902 return 0;
438 } 903 }
439} 904}
440 905
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 89ae4c8f95a2..30df8a284715 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -17,7 +17,7 @@
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfa_ioc.h" 19#include "bfa_ioc.h"
20#include "bfi_cbreg.h" 20#include "bfi_reg.h"
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23BFA_TRC_FILE(CNA, IOC_CB); 23BFA_TRC_FILE(CNA, IOC_CB);
@@ -69,21 +69,6 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
69static bfa_boolean_t 69static bfa_boolean_t
70bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) 70bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
71{ 71{
72 struct bfi_ioc_image_hdr_s fwhdr;
73 uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
74
75 if (fwstate == BFI_IOC_UNINIT)
76 return BFA_TRUE;
77
78 bfa_ioc_fwver_get(ioc, &fwhdr);
79
80 if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
81 return BFA_TRUE;
82
83 bfa_trc(ioc, fwstate);
84 bfa_trc(ioc, fwhdr.exec);
85 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
86
87 return BFA_TRUE; 72 return BFA_TRUE;
88} 73}
89 74
@@ -98,7 +83,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
98static void 83static void
99bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc) 84bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
100{ 85{
101 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 86 writel(~0U, ioc->ioc_regs.err_set);
102 readl(ioc->ioc_regs.err_set); 87 readl(ioc->ioc_regs.err_set);
103} 88}
104 89
@@ -152,8 +137,8 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
152 */ 137 */
153 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 138 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
154 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 139 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
155 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG); 140 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
156 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG); 141 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
157 142
158 /* 143 /*
159 * IOC semaphore registers and serialization 144 * IOC semaphore registers and serialization
@@ -285,18 +270,18 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
285} 270}
286 271
287bfa_status_t 272bfa_status_t
288bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 273bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
289{ 274{
290 u32 pll_sclk, pll_fclk; 275 u32 pll_sclk, pll_fclk;
291 276
292 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | 277 pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
293 __APP_PLL_212_P0_1(3U) | 278 __APP_PLL_SCLK_P0_1(3U) |
294 __APP_PLL_212_JITLMT0_1(3U) | 279 __APP_PLL_SCLK_JITLMT0_1(3U) |
295 __APP_PLL_212_CNTLMT0_1(3U); 280 __APP_PLL_SCLK_CNTLMT0_1(3U);
296 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | 281 pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
297 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | 282 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
298 __APP_PLL_400_JITLMT0_1(3U) | 283 __APP_PLL_LCLK_JITLMT0_1(3U) |
299 __APP_PLL_400_CNTLMT0_1(3U); 284 __APP_PLL_LCLK_CNTLMT0_1(3U);
300 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 285 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
301 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 286 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
302 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 287 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
@@ -305,24 +290,24 @@ bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
305 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 290 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
306 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 291 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
307 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 292 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
308 writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); 293 writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
309 writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET, 294 writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
310 rb + APP_PLL_212_CTL_REG); 295 rb + APP_PLL_SCLK_CTL_REG);
311 writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); 296 writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
312 writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET, 297 writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
313 rb + APP_PLL_400_CTL_REG); 298 rb + APP_PLL_LCLK_CTL_REG);
314 udelay(2); 299 udelay(2);
315 writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); 300 writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
316 writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); 301 writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
317 writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET, 302 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
318 rb + APP_PLL_212_CTL_REG); 303 rb + APP_PLL_SCLK_CTL_REG);
319 writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET, 304 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
320 rb + APP_PLL_400_CTL_REG); 305 rb + APP_PLL_LCLK_CTL_REG);
321 udelay(2000); 306 udelay(2000);
322 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 307 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
323 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 308 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
324 writel(pll_sclk, (rb + APP_PLL_212_CTL_REG)); 309 writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
325 writel(pll_fclk, (rb + APP_PLL_400_CTL_REG)); 310 writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
326 311
327 return BFA_STATUS_OK; 312 return BFA_STATUS_OK;
328} 313}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 93612520f0d2..d1b8f0caaa79 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -17,7 +17,7 @@
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfa_ioc.h" 19#include "bfa_ioc.h"
20#include "bfi_ctreg.h" 20#include "bfi_reg.h"
21#include "bfa_defs.h" 21#include "bfa_defs.h"
22 22
23BFA_TRC_FILE(CNA, IOC_CT); 23BFA_TRC_FILE(CNA, IOC_CT);
@@ -36,9 +36,6 @@ BFA_TRC_FILE(CNA, IOC_CT);
36 */ 36 */
37static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); 37static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
38static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); 38static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 39static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 40static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc); 41static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
@@ -48,29 +45,7 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
48static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); 45static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
49 46
50static struct bfa_ioc_hwif_s hwif_ct; 47static struct bfa_ioc_hwif_s hwif_ct;
51 48static struct bfa_ioc_hwif_s hwif_ct2;
52/*
53 * Called from bfa_ioc_attach() to map asic specific calls.
54 */
55void
56bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
57{
58 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
59 hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
60 hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
61 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
62 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
63 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
64 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
65 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
66 hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
67 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
68 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
69 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
70 hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
71
72 ioc->ioc_hwif = &hwif_ct;
73}
74 49
75/* 50/*
76 * Return true if firmware of current driver matches the running firmware. 51 * Return true if firmware of current driver matches the running firmware.
@@ -83,15 +58,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
83 struct bfi_ioc_image_hdr_s fwhdr; 58 struct bfi_ioc_image_hdr_s fwhdr;
84 59
85 /* 60 /*
86 * Firmware match check is relevant only for CNA.
87 */
88 if (!ioc->cna)
89 return BFA_TRUE;
90
91 /*
92 * If bios boot (flash based) -- do not increment usage count 61 * If bios boot (flash based) -- do not increment usage count
93 */ 62 */
94 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 63 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
95 BFA_IOC_FWIMG_MINSZ) 64 BFA_IOC_FWIMG_MINSZ)
96 return BFA_TRUE; 65 return BFA_TRUE;
97 66
@@ -103,6 +72,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
103 */ 72 */
104 if (usecnt == 0) { 73 if (usecnt == 0) {
105 writel(1, ioc->ioc_regs.ioc_usage_reg); 74 writel(1, ioc->ioc_regs.ioc_usage_reg);
75 readl(ioc->ioc_regs.ioc_usage_sem_reg);
106 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 76 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
107 writel(0, ioc->ioc_regs.ioc_fail_sync); 77 writel(0, ioc->ioc_regs.ioc_fail_sync);
108 bfa_trc(ioc, usecnt); 78 bfa_trc(ioc, usecnt);
@@ -122,6 +92,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
122 */ 92 */
123 bfa_ioc_fwver_get(ioc, &fwhdr); 93 bfa_ioc_fwver_get(ioc, &fwhdr);
124 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 94 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
95 readl(ioc->ioc_regs.ioc_usage_sem_reg);
125 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 96 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
126 bfa_trc(ioc, usecnt); 97 bfa_trc(ioc, usecnt);
127 return BFA_FALSE; 98 return BFA_FALSE;
@@ -132,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
132 */ 103 */
133 usecnt++; 104 usecnt++;
134 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 105 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
106 readl(ioc->ioc_regs.ioc_usage_sem_reg);
135 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 107 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
136 bfa_trc(ioc, usecnt); 108 bfa_trc(ioc, usecnt);
137 return BFA_TRUE; 109 return BFA_TRUE;
@@ -143,15 +115,9 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
143 u32 usecnt; 115 u32 usecnt;
144 116
145 /* 117 /*
146 * Firmware lock is relevant only for CNA.
147 */
148 if (!ioc->cna)
149 return;
150
151 /*
152 * If bios boot (flash based) -- do not decrement usage count 118 * If bios boot (flash based) -- do not decrement usage count
153 */ 119 */
154 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 120 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
155 BFA_IOC_FWIMG_MINSZ) 121 BFA_IOC_FWIMG_MINSZ)
156 return; 122 return;
157 123
@@ -166,6 +132,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
166 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 132 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
167 bfa_trc(ioc, usecnt); 133 bfa_trc(ioc, usecnt);
168 134
135 readl(ioc->ioc_regs.ioc_usage_sem_reg);
169 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 136 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
170} 137}
171 138
@@ -175,14 +142,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
175static void 142static void
176bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) 143bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
177{ 144{
178 if (ioc->cna) { 145 if (bfa_ioc_is_cna(ioc)) {
179 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 146 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
180 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 147 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
181 /* Wait for halt to take effect */ 148 /* Wait for halt to take effect */
182 readl(ioc->ioc_regs.ll_halt); 149 readl(ioc->ioc_regs.ll_halt);
183 readl(ioc->ioc_regs.alt_ll_halt); 150 readl(ioc->ioc_regs.alt_ll_halt);
184 } else { 151 } else {
185 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 152 writel(~0U, ioc->ioc_regs.err_set);
186 readl(ioc->ioc_regs.err_set); 153 readl(ioc->ioc_regs.err_set);
187 } 154 }
188} 155}
@@ -190,7 +157,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
190/* 157/*
191 * Host to LPU mailbox message addresses 158 * Host to LPU mailbox message addresses
192 */ 159 */
193static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 160static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
194 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 161 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
195 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 162 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
196 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 163 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
@@ -200,21 +167,31 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
200/* 167/*
201 * Host <-> LPU mailbox command/status registers - port 0 168 * Host <-> LPU mailbox command/status registers - port 0
202 */ 169 */
203static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { 170static struct { u32 hfn, lpu; } ct_p0reg[] = {
204 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT }, 171 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
205 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT }, 172 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
206 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT }, 173 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
207 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } 174 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
208}; 175};
209 176
210/* 177/*
211 * Host <-> LPU mailbox command/status registers - port 1 178 * Host <-> LPU mailbox command/status registers - port 1
212 */ 179 */
213static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { 180static struct { u32 hfn, lpu; } ct_p1reg[] = {
214 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT }, 181 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
215 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT }, 182 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
216 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT }, 183 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
217 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT } 184 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
185};
186
187static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
188 ct2_reg[] = {
189 { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
190 CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
191 CT2_HOSTFN_LPU0_READ_STAT},
192 { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
193 CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
194 CT2_HOSTFN_LPU1_READ_STAT},
218}; 195};
219 196
220static void 197static void
@@ -225,24 +202,24 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
225 202
226 rb = bfa_ioc_bar0(ioc); 203 rb = bfa_ioc_bar0(ioc);
227 204
228 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; 205 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
229 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; 206 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
230 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; 207 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
231 208
232 if (ioc->port_id == 0) { 209 if (ioc->port_id == 0) {
233 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 210 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
234 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 211 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
235 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; 212 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
236 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 213 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
237 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 214 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
238 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 215 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
239 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; 216 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
240 } else { 217 } else {
241 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 218 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
242 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 219 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
243 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; 220 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
244 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 221 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
245 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 222 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
246 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 223 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
247 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; 224 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
248 } 225 }
@@ -252,8 +229,8 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
252 */ 229 */
253 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); 230 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
254 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); 231 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
255 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); 232 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
256 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); 233 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
257 234
258 /* 235 /*
259 * IOC semaphore registers and serialization 236 * IOC semaphore registers and serialization
@@ -276,6 +253,64 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
276 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); 253 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
277} 254}
278 255
256static void
257bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
258{
259 void __iomem *rb;
260 int port = bfa_ioc_portid(ioc);
261
262 rb = bfa_ioc_bar0(ioc);
263
264 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
265 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
266 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
267 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
268 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
269 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
270
271 if (port == 0) {
272 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
273 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
274 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
275 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
276 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
277 } else {
278 ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
279 ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
280 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
281 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
282 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
283 }
284
285 /*
286 * PSS control registers
287 */
288 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
289 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
290 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
291 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
292
293 /*
294 * IOC semaphore registers and serialization
295 */
296 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
297 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
298 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
299 ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
300 ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
301
302 /*
303 * sram memory access
304 */
305 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
306 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
307
308 /*
309 * err set reg : for notification of hb failure in fcmode
310 */
311 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
312}
313
279/* 314/*
280 * Initialize IOC to port mapping. 315 * Initialize IOC to port mapping.
281 */ 316 */
@@ -298,6 +333,19 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
298 bfa_trc(ioc, ioc->port_id); 333 bfa_trc(ioc, ioc->port_id);
299} 334}
300 335
336static void
337bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
338{
339 void __iomem *rb = ioc->pcidev.pci_bar_kva;
340 u32 r32;
341
342 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
343 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
344
345 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
346 bfa_trc(ioc, ioc->port_id);
347}
348
301/* 349/*
302 * Set interrupt mode for a function: INTX or MSIX 350 * Set interrupt mode for a function: INTX or MSIX
303 */ 351 */
@@ -316,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
316 /* 364 /*
317 * If already in desired mode, do not change anything 365 * If already in desired mode, do not change anything
318 */ 366 */
319 if (!msix && mode) 367 if ((!msix && mode) || (msix && !mode))
320 return; 368 return;
321 369
322 if (msix) 370 if (msix)
@@ -331,6 +379,20 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
331 writel(r32, rb + FNC_PERS_REG); 379 writel(r32, rb + FNC_PERS_REG);
332} 380}
333 381
382bfa_boolean_t
383bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
384{
385 u32 r32;
386
387 r32 = readl(ioc->ioc_regs.lpu_read_stat);
388 if (r32) {
389 writel(1, ioc->ioc_regs.lpu_read_stat);
390 return BFA_TRUE;
391 }
392
393 return BFA_FALSE;
394}
395
334/* 396/*
335 * Cleanup hw semaphore and usecnt registers 397 * Cleanup hw semaphore and usecnt registers
336 */ 398 */
@@ -338,9 +400,10 @@ static void
338bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) 400bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
339{ 401{
340 402
341 if (ioc->cna) { 403 if (bfa_ioc_is_cna(ioc)) {
342 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 404 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
343 writel(0, ioc->ioc_regs.ioc_usage_reg); 405 writel(0, ioc->ioc_regs.ioc_usage_reg);
406 readl(ioc->ioc_regs.ioc_usage_sem_reg);
344 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 407 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
345 } 408 }
346 409
@@ -449,32 +512,99 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
449 return BFA_FALSE; 512 return BFA_FALSE;
450} 513}
451 514
515/**
516 * Called from bfa_ioc_attach() to map asic specific calls.
517 */
518static void
519bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
520{
521 hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
522 hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
523 hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
524 hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
525 hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
526 hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
527 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
528 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
529 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
530}
531
532/**
533 * Called from bfa_ioc_attach() to map asic specific calls.
534 */
535void
536bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
537{
538 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
539
540 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
541 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
542 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
543 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
544 ioc->ioc_hwif = &hwif_ct;
545}
546
547/**
548 * Called from bfa_ioc_attach() to map asic specific calls.
549 */
550void
551bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
552{
553 bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
554
555 hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
556 hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
557 hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
558 hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
559 hwif_ct2.ioc_isr_mode_set = NULL;
560 ioc->ioc_hwif = &hwif_ct2;
561}
562
452/* 563/*
453 * Check the firmware state to know if pll_init has been completed already 564 * Workaround for MSI-X resource allocation for catapult-2 with no asic block
454 */ 565 */
455bfa_boolean_t 566#define HOSTFN_MSIX_DEFAULT 64
456bfa_ioc_ct_pll_init_complete(void __iomem *rb) 567#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
568#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
569#define __MSIX_VT_NUMVT__MK 0x003ff800
570#define __MSIX_VT_NUMVT__SH 11
571#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
572#define __MSIX_VT_OFST_ 0x000007ff
573void
574bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
457{ 575{
458 if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || 576 void __iomem *rb = ioc->pcidev.pci_bar_kva;
459 (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) 577 u32 r32;
460 return BFA_TRUE;
461 578
462 return BFA_FALSE; 579 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
580 if (r32 & __MSIX_VT_NUMVT__MK) {
581 writel(r32 & __MSIX_VT_OFST_,
582 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
583 return;
584 }
585
586 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
587 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
588 rb + HOSTFN_MSIX_VT_OFST_NUMVT);
589 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
590 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
463} 591}
464 592
465bfa_status_t 593bfa_status_t
466bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 594bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
467{ 595{
468 u32 pll_sclk, pll_fclk, r32; 596 u32 pll_sclk, pll_fclk, r32;
597 bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
598
599 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
600 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
601 __APP_PLL_SCLK_JITLMT0_1(3U) |
602 __APP_PLL_SCLK_CNTLMT0_1(1U);
603 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
604 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
605 __APP_PLL_LCLK_JITLMT0_1(3U) |
606 __APP_PLL_LCLK_CNTLMT0_1(1U);
469 607
470 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
471 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
472 __APP_PLL_312_JITLMT0_1(3U) |
473 __APP_PLL_312_CNTLMT0_1(1U);
474 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
475 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
476 __APP_PLL_425_JITLMT0_1(3U) |
477 __APP_PLL_425_CNTLMT0_1(1U);
478 if (fcmode) { 608 if (fcmode) {
479 writel(0, (rb + OP_MODE)); 609 writel(0, (rb + OP_MODE));
480 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | 610 writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
@@ -491,20 +621,21 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
491 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 621 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
492 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 622 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
493 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 623 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
494 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET, 624 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
495 rb + APP_PLL_312_CTL_REG); 625 rb + APP_PLL_SCLK_CTL_REG);
496 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET, 626 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
497 rb + APP_PLL_425_CTL_REG); 627 rb + APP_PLL_LCLK_CTL_REG);
498 writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE, 628 writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
499 rb + APP_PLL_312_CTL_REG); 629 __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
500 writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE, 630 writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
501 rb + APP_PLL_425_CTL_REG); 631 __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
502 readl(rb + HOSTFN0_INT_MSK); 632 readl(rb + HOSTFN0_INT_MSK);
503 udelay(2000); 633 udelay(2000);
504 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 634 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
505 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); 635 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
506 writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG); 636 writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
507 writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG); 637 writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
638
508 if (!fcmode) { 639 if (!fcmode) {
509 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); 640 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
510 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); 641 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
@@ -524,3 +655,206 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
524 writel(0, (rb + MBIST_CTL_REG)); 655 writel(0, (rb + MBIST_CTL_REG));
525 return BFA_STATUS_OK; 656 return BFA_STATUS_OK;
526} 657}
658
659static void
660bfa_ioc_ct2_sclk_init(void __iomem *rb)
661{
662 u32 r32;
663
664 /*
665 * put s_clk PLL and PLL FSM in reset
666 */
667 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
668 r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
669 r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
670 __APP_PLL_SCLK_LOGIC_SOFT_RESET);
671 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
672
673 /*
674 * Ignore mode and program for the max clock (which is FC16)
675 * Firmware/NFC will do the PLL init appropiately
676 */
677 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
678 r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
679 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
680
681 /*
682 * while doing PLL init dont clock gate ethernet subsystem
683 */
684 r32 = readl((rb + CT2_CHIP_MISC_PRG));
685 writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
686
687 r32 = readl((rb + CT2_PCIE_MISC_REG));
688 writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
689
690 /*
691 * set sclk value
692 */
693 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
694 r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
695 __APP_PLL_SCLK_CLK_DIV2);
696 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
697
698 /*
699 * poll for s_clk lock or delay 1ms
700 */
701 udelay(1000);
702}
703
704static void
705bfa_ioc_ct2_lclk_init(void __iomem *rb)
706{
707 u32 r32;
708
709 /*
710 * put l_clk PLL and PLL FSM in reset
711 */
712 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
713 r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
714 r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
715 __APP_PLL_LCLK_LOGIC_SOFT_RESET);
716 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
717
718 /*
719 * set LPU speed (set for FC16 which will work for other modes)
720 */
721 r32 = readl((rb + CT2_CHIP_MISC_PRG));
722 writel(r32, (rb + CT2_CHIP_MISC_PRG));
723
724 /*
725 * set LPU half speed (set for FC16 which will work for other modes)
726 */
727 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
728 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
729
730 /*
731 * set lclk for mode (set for FC16)
732 */
733 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
734 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
735 r32 |= 0x20c1731b;
736 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
737
738 /*
739 * poll for s_clk lock or delay 1ms
740 */
741 udelay(1000);
742}
743
744static void
745bfa_ioc_ct2_mem_init(void __iomem *rb)
746{
747 u32 r32;
748
749 r32 = readl((rb + PSS_CTL_REG));
750 r32 &= ~__PSS_LMEM_RESET;
751 writel(r32, (rb + PSS_CTL_REG));
752 udelay(1000);
753
754 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
755 udelay(1000);
756 writel(0, (rb + CT2_MBIST_CTL_REG));
757}
758
759void
760bfa_ioc_ct2_mac_reset(void __iomem *rb)
761{
762 u32 r32;
763
764 bfa_ioc_ct2_sclk_init(rb);
765 bfa_ioc_ct2_lclk_init(rb);
766
767 /*
768 * release soft reset on s_clk & l_clk
769 */
770 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
771 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
772 (rb + CT2_APP_PLL_SCLK_CTL_REG));
773
774 /*
775 * release soft reset on s_clk & l_clk
776 */
777 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
778 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
779 (rb + CT2_APP_PLL_LCLK_CTL_REG));
780
781 /* put port0, port1 MAC & AHB in reset */
782 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
783 rb + CT2_CSI_MAC_CONTROL_REG(0));
784 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
785 rb + CT2_CSI_MAC_CONTROL_REG(1));
786}
787
788#define CT2_NFC_MAX_DELAY 1000
789bfa_status_t
790bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
791{
792 u32 wgn, r32;
793 int i;
794
795 /*
796 * Initialize PLL if not already done by NFC
797 */
798 wgn = readl(rb + CT2_WGN_STATUS);
799 if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
800 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
801 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
802 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
803 if (r32 & __NFC_CONTROLLER_HALTED)
804 break;
805 udelay(1000);
806 }
807 }
808
809 /*
810 * Mask the interrupts and clear any
811 * pending interrupts.
812 */
813 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
814 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
815
816 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
817 if (r32 == 1) {
818 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
819 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
820 }
821 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
822 if (r32 == 1) {
823 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
824 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
825 }
826
827 bfa_ioc_ct2_mac_reset(rb);
828 bfa_ioc_ct2_sclk_init(rb);
829 bfa_ioc_ct2_lclk_init(rb);
830
831 /*
832 * release soft reset on s_clk & l_clk
833 */
834 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
835 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
836 (rb + CT2_APP_PLL_SCLK_CTL_REG));
837
838 /*
839 * release soft reset on s_clk & l_clk
840 */
841 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
842 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
843 (rb + CT2_APP_PLL_LCLK_CTL_REG));
844
845 /*
846 * Announce flash device presence, if flash was corrupted.
847 */
848 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
849 r32 = readl((rb + PSS_GPIO_OUT_REG));
850 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
851 r32 = readl((rb + PSS_GPIO_OE_REG));
852 writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
853 }
854
855 bfa_ioc_ct2_mem_init(rb);
856
857 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
858 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
859 return BFA_STATUS_OK;
860}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index ab79ff6fdeea..1c6efd40a673 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -29,14 +29,21 @@
29#include "bfa_port.h" 29#include "bfa_port.h"
30 30
31struct bfa_modules_s { 31struct bfa_modules_s {
32 struct bfa_fcdiag_s fcdiag; /* fcdiag module */
32 struct bfa_fcport_s fcport; /* fc port module */ 33 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ 34 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */ 35 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ 36 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
36 struct bfa_rport_mod_s rport_mod; /* remote port module */ 37 struct bfa_rport_mod_s rport_mod; /* remote port module */
37 struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */ 38 struct bfa_fcp_mod_s fcp_mod; /* FCP initiator module */
38 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */ 39 struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
39 struct bfa_port_s port; /* Physical port module */ 40 struct bfa_port_s port; /* Physical port module */
41 struct bfa_ablk_s ablk; /* ASIC block config module */
42 struct bfa_cee_s cee; /* CEE Module */
43 struct bfa_sfp_s sfp; /* SFP module */
44 struct bfa_flash_s flash; /* flash module */
45 struct bfa_diag_s diag_mod; /* diagnostics module */
46 struct bfa_phy_s phy; /* phy module */
40}; 47};
41 48
42/* 49/*
@@ -51,17 +58,16 @@ enum {
51 BFA_TRC_HAL_IOCFC_CB = 5, 58 BFA_TRC_HAL_IOCFC_CB = 5,
52}; 59};
53 60
54
55/* 61/*
56 * Macro to define a new BFA module 62 * Macro to define a new BFA module
57 */ 63 */
58#define BFA_MODULE(__mod) \ 64#define BFA_MODULE(__mod) \
59 static void bfa_ ## __mod ## _meminfo( \ 65 static void bfa_ ## __mod ## _meminfo( \
60 struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \ 66 struct bfa_iocfc_cfg_s *cfg, \
61 u32 *dm_len); \ 67 struct bfa_meminfo_s *meminfo, \
68 struct bfa_s *bfa); \
62 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ 69 static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
63 void *bfad, struct bfa_iocfc_cfg_s *cfg, \ 70 void *bfad, struct bfa_iocfc_cfg_s *cfg, \
64 struct bfa_meminfo_s *meminfo, \
65 struct bfa_pcidev_s *pcidev); \ 71 struct bfa_pcidev_s *pcidev); \
66 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ 72 static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
67 static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \ 73 static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
@@ -87,11 +93,11 @@ enum {
87 * can leave entry points as NULL) 93 * can leave entry points as NULL)
88 */ 94 */
89struct bfa_module_s { 95struct bfa_module_s {
90 void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 96 void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
91 u32 *dm_len); 97 struct bfa_meminfo_s *meminfo,
98 struct bfa_s *bfa);
92 void (*attach) (struct bfa_s *bfa, void *bfad, 99 void (*attach) (struct bfa_s *bfa, void *bfad,
93 struct bfa_iocfc_cfg_s *cfg, 100 struct bfa_iocfc_cfg_s *cfg,
94 struct bfa_meminfo_s *meminfo,
95 struct bfa_pcidev_s *pcidev); 101 struct bfa_pcidev_s *pcidev);
96 void (*detach) (struct bfa_s *bfa); 102 void (*detach) (struct bfa_s *bfa);
97 void (*start) (struct bfa_s *bfa); 103 void (*start) (struct bfa_s *bfa);
@@ -109,19 +115,20 @@ struct bfa_s {
109 struct bfa_timer_mod_s timer_mod; /* timer module */ 115 struct bfa_timer_mod_s timer_mod; /* timer module */
110 struct bfa_modules_s modules; /* BFA modules */ 116 struct bfa_modules_s modules; /* BFA modules */
111 struct list_head comp_q; /* pending completions */ 117 struct list_head comp_q; /* pending completions */
112 bfa_boolean_t rme_process; /* RME processing enabled */ 118 bfa_boolean_t queue_process; /* queue processing enabled */
113 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 119 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
114 bfa_boolean_t fcs; /* FCS is attached to BFA */ 120 bfa_boolean_t fcs; /* FCS is attached to BFA */
115 struct bfa_msix_s msix; 121 struct bfa_msix_s msix;
116}; 122};
117 123
118extern bfa_boolean_t bfa_auto_recover; 124extern bfa_boolean_t bfa_auto_recover;
125extern struct bfa_module_s hal_mod_fcdiag;
119extern struct bfa_module_s hal_mod_sgpg; 126extern struct bfa_module_s hal_mod_sgpg;
120extern struct bfa_module_s hal_mod_fcport; 127extern struct bfa_module_s hal_mod_fcport;
121extern struct bfa_module_s hal_mod_fcxp; 128extern struct bfa_module_s hal_mod_fcxp;
122extern struct bfa_module_s hal_mod_lps; 129extern struct bfa_module_s hal_mod_lps;
123extern struct bfa_module_s hal_mod_uf; 130extern struct bfa_module_s hal_mod_uf;
124extern struct bfa_module_s hal_mod_rport; 131extern struct bfa_module_s hal_mod_rport;
125extern struct bfa_module_s hal_mod_fcpim; 132extern struct bfa_module_s hal_mod_fcp;
126 133
127#endif /* __BFA_MODULES_H__ */ 134#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 3f8e9d6066ec..95e4ad8759ac 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -24,8 +24,6 @@
24 24
25BFA_TRC_FILE(CNA, PORT); 25BFA_TRC_FILE(CNA, PORT);
26 26
27#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
28
29static void 27static void
30bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) 28bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
31{ 29{
@@ -236,6 +234,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
236{ 234{
237 struct bfi_port_generic_req_s *m; 235 struct bfi_port_generic_req_s *m;
238 236
237 /* If port is PBC disabled, return error */
238 if (port->pbc_disabled) {
239 bfa_trc(port, BFA_STATUS_PBC);
240 return BFA_STATUS_PBC;
241 }
242
239 if (bfa_ioc_is_disabled(port->ioc)) { 243 if (bfa_ioc_is_disabled(port->ioc)) {
240 bfa_trc(port, BFA_STATUS_IOC_DISABLED); 244 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
241 return BFA_STATUS_IOC_DISABLED; 245 return BFA_STATUS_IOC_DISABLED;
@@ -280,6 +284,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
280{ 284{
281 struct bfi_port_generic_req_s *m; 285 struct bfi_port_generic_req_s *m;
282 286
287 /* If port is PBC disabled, return error */
288 if (port->pbc_disabled) {
289 bfa_trc(port, BFA_STATUS_PBC);
290 return BFA_STATUS_PBC;
291 }
292
283 if (bfa_ioc_is_disabled(port->ioc)) { 293 if (bfa_ioc_is_disabled(port->ioc)) {
284 bfa_trc(port, BFA_STATUS_IOC_DISABLED); 294 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
285 return BFA_STATUS_IOC_DISABLED; 295 return BFA_STATUS_IOC_DISABLED;
@@ -387,32 +397,43 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
387} 397}
388 398
389/* 399/*
390 * bfa_port_hbfail() 400 * bfa_port_notify()
391 * 401 *
402 * Port module IOC event handler
392 * 403 *
393 * @param[in] Pointer to the Port module data structure. 404 * @param[in] Pointer to the Port module data structure.
405 * @param[in] IOC event structure
394 * 406 *
395 * @return void 407 * @return void
396 */ 408 */
397void 409void
398bfa_port_hbfail(void *arg) 410bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
399{ 411{
400 struct bfa_port_s *port = (struct bfa_port_s *) arg; 412 struct bfa_port_s *port = (struct bfa_port_s *) arg;
401 413
402 /* Fail any pending get_stats/clear_stats requests */ 414 switch (event) {
403 if (port->stats_busy) { 415 case BFA_IOC_E_DISABLED:
404 if (port->stats_cbfn) 416 case BFA_IOC_E_FAILED:
405 port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED); 417 /* Fail any pending get_stats/clear_stats requests */
406 port->stats_cbfn = NULL; 418 if (port->stats_busy) {
407 port->stats_busy = BFA_FALSE; 419 if (port->stats_cbfn)
408 } 420 port->stats_cbfn(port->stats_cbarg,
409 421 BFA_STATUS_FAILED);
410 /* Clear any enable/disable is pending */ 422 port->stats_cbfn = NULL;
411 if (port->endis_pending) { 423 port->stats_busy = BFA_FALSE;
412 if (port->endis_cbfn) 424 }
413 port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED); 425
414 port->endis_cbfn = NULL; 426 /* Clear any enable/disable is pending */
415 port->endis_pending = BFA_FALSE; 427 if (port->endis_pending) {
428 if (port->endis_cbfn)
429 port->endis_cbfn(port->endis_cbarg,
430 BFA_STATUS_FAILED);
431 port->endis_cbfn = NULL;
432 port->endis_pending = BFA_FALSE;
433 }
434 break;
435 default:
436 break;
416 } 437 }
417} 438}
418 439
@@ -445,10 +466,12 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
445 port->endis_pending = BFA_FALSE; 466 port->endis_pending = BFA_FALSE;
446 port->stats_cbfn = NULL; 467 port->stats_cbfn = NULL;
447 port->endis_cbfn = NULL; 468 port->endis_cbfn = NULL;
469 port->pbc_disabled = BFA_FALSE;
448 470
449 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); 471 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
450 bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); 472 bfa_q_qe_init(&port->ioc_notify);
451 list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q); 473 bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
474 list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
452 475
453 /* 476 /*
454 * initialize time stamp for stats reset 477 * initialize time stamp for stats reset
@@ -458,3 +481,368 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
458 481
459 bfa_trc(port, 0); 482 bfa_trc(port, 0);
460} 483}
484
485/*
486 * CEE module specific definitions
487 */
488
489/*
490 * bfa_cee_get_attr_isr()
491 *
492 * @brief CEE ISR for get-attributes responses from f/w
493 *
494 * @param[in] cee - Pointer to the CEE module
495 * status - Return status from the f/w
496 *
497 * @return void
498 */
499static void
500bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
501{
502 struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
503
504 cee->get_attr_status = status;
505 bfa_trc(cee, 0);
506 if (status == BFA_STATUS_OK) {
507 bfa_trc(cee, 0);
508 memcpy(cee->attr, cee->attr_dma.kva,
509 sizeof(struct bfa_cee_attr_s));
510 lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
511 lldp_cfg->enabled_system_cap =
512 be16_to_cpu(lldp_cfg->enabled_system_cap);
513 }
514 cee->get_attr_pending = BFA_FALSE;
515 if (cee->cbfn.get_attr_cbfn) {
516 bfa_trc(cee, 0);
517 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
518 }
519}
520
521/*
522 * bfa_cee_get_stats_isr()
523 *
524 * @brief CEE ISR for get-stats responses from f/w
525 *
526 * @param[in] cee - Pointer to the CEE module
527 * status - Return status from the f/w
528 *
529 * @return void
530 */
531static void
532bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
533{
534 u32 *buffer;
535 int i;
536
537 cee->get_stats_status = status;
538 bfa_trc(cee, 0);
539 if (status == BFA_STATUS_OK) {
540 bfa_trc(cee, 0);
541 memcpy(cee->stats, cee->stats_dma.kva,
542 sizeof(struct bfa_cee_stats_s));
543 /* swap the cee stats */
544 buffer = (u32 *)cee->stats;
545 for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
546 sizeof(u32)); i++)
547 buffer[i] = cpu_to_be32(buffer[i]);
548 }
549 cee->get_stats_pending = BFA_FALSE;
550 bfa_trc(cee, 0);
551 if (cee->cbfn.get_stats_cbfn) {
552 bfa_trc(cee, 0);
553 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
554 }
555}
556
557/*
558 * bfa_cee_reset_stats_isr()
559 *
560 * @brief CEE ISR for reset-stats responses from f/w
561 *
562 * @param[in] cee - Pointer to the CEE module
563 * status - Return status from the f/w
564 *
565 * @return void
566 */
567static void
568bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
569{
570 cee->reset_stats_status = status;
571 cee->reset_stats_pending = BFA_FALSE;
572 if (cee->cbfn.reset_stats_cbfn)
573 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
574}
575
576/*
577 * bfa_cee_meminfo()
578 *
579 * @brief Returns the size of the DMA memory needed by CEE module
580 *
581 * @param[in] void
582 *
583 * @return Size of DMA region
584 */
585u32
586bfa_cee_meminfo(void)
587{
588 return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
589 BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
590}
591
592/*
593 * bfa_cee_mem_claim()
594 *
595 * @brief Initialized CEE DMA Memory
596 *
597 * @param[in] cee CEE module pointer
598 * dma_kva Kernel Virtual Address of CEE DMA Memory
599 * dma_pa Physical Address of CEE DMA Memory
600 *
601 * @return void
602 */
603void
604bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
605{
606 cee->attr_dma.kva = dma_kva;
607 cee->attr_dma.pa = dma_pa;
608 cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
609 sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
610 cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
611 sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
612 cee->attr = (struct bfa_cee_attr_s *) dma_kva;
613 cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
614 sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
615}
616
617/*
618 * bfa_cee_get_attr()
619 *
620 * @brief
621 * Send the request to the f/w to fetch CEE attributes.
622 *
623 * @param[in] Pointer to the CEE module data structure.
624 *
625 * @return Status
626 */
627
628bfa_status_t
629bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
630 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
631{
632 struct bfi_cee_get_req_s *cmd;
633
634 WARN_ON((cee == NULL) || (cee->ioc == NULL));
635 bfa_trc(cee, 0);
636 if (!bfa_ioc_is_operational(cee->ioc)) {
637 bfa_trc(cee, 0);
638 return BFA_STATUS_IOC_FAILURE;
639 }
640 if (cee->get_attr_pending == BFA_TRUE) {
641 bfa_trc(cee, 0);
642 return BFA_STATUS_DEVBUSY;
643 }
644 cee->get_attr_pending = BFA_TRUE;
645 cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
646 cee->attr = attr;
647 cee->cbfn.get_attr_cbfn = cbfn;
648 cee->cbfn.get_attr_cbarg = cbarg;
649 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
650 bfa_ioc_portid(cee->ioc));
651 bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
652 bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
653
654 return BFA_STATUS_OK;
655}
656
657/*
658 * bfa_cee_get_stats()
659 *
660 * @brief
661 * Send the request to the f/w to fetch CEE statistics.
662 *
663 * @param[in] Pointer to the CEE module data structure.
664 *
665 * @return Status
666 */
667
668bfa_status_t
669bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
670 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
671{
672 struct bfi_cee_get_req_s *cmd;
673
674 WARN_ON((cee == NULL) || (cee->ioc == NULL));
675
676 if (!bfa_ioc_is_operational(cee->ioc)) {
677 bfa_trc(cee, 0);
678 return BFA_STATUS_IOC_FAILURE;
679 }
680 if (cee->get_stats_pending == BFA_TRUE) {
681 bfa_trc(cee, 0);
682 return BFA_STATUS_DEVBUSY;
683 }
684 cee->get_stats_pending = BFA_TRUE;
685 cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
686 cee->stats = stats;
687 cee->cbfn.get_stats_cbfn = cbfn;
688 cee->cbfn.get_stats_cbarg = cbarg;
689 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
690 bfa_ioc_portid(cee->ioc));
691 bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
692 bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
693
694 return BFA_STATUS_OK;
695}
696
697/*
698 * bfa_cee_reset_stats()
699 *
700 * @brief Clears CEE Stats in the f/w.
701 *
702 * @param[in] Pointer to the CEE module data structure.
703 *
704 * @return Status
705 */
706
707bfa_status_t
708bfa_cee_reset_stats(struct bfa_cee_s *cee,
709 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
710{
711 struct bfi_cee_reset_stats_s *cmd;
712
713 WARN_ON((cee == NULL) || (cee->ioc == NULL));
714 if (!bfa_ioc_is_operational(cee->ioc)) {
715 bfa_trc(cee, 0);
716 return BFA_STATUS_IOC_FAILURE;
717 }
718 if (cee->reset_stats_pending == BFA_TRUE) {
719 bfa_trc(cee, 0);
720 return BFA_STATUS_DEVBUSY;
721 }
722 cee->reset_stats_pending = BFA_TRUE;
723 cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
724 cee->cbfn.reset_stats_cbfn = cbfn;
725 cee->cbfn.reset_stats_cbarg = cbarg;
726 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
727 bfa_ioc_portid(cee->ioc));
728 bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
729
730 return BFA_STATUS_OK;
731}
732
733/*
734 * bfa_cee_isrs()
735 *
736 * @brief Handles Mail-box interrupts for CEE module.
737 *
738 * @param[in] Pointer to the CEE module data structure.
739 *
740 * @return void
741 */
742
743void
744bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
745{
746 union bfi_cee_i2h_msg_u *msg;
747 struct bfi_cee_get_rsp_s *get_rsp;
748 struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
749 msg = (union bfi_cee_i2h_msg_u *) m;
750 get_rsp = (struct bfi_cee_get_rsp_s *) m;
751 bfa_trc(cee, msg->mh.msg_id);
752 switch (msg->mh.msg_id) {
753 case BFI_CEE_I2H_GET_CFG_RSP:
754 bfa_trc(cee, get_rsp->cmd_status);
755 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
756 break;
757 case BFI_CEE_I2H_GET_STATS_RSP:
758 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
759 break;
760 case BFI_CEE_I2H_RESET_STATS_RSP:
761 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
762 break;
763 default:
764 WARN_ON(1);
765 }
766}
767
768/*
769 * bfa_cee_notify()
770 *
771 * @brief CEE module IOC event handler.
772 *
773 * @param[in] Pointer to the CEE module data structure.
774 * @param[in] IOC event type
775 *
776 * @return void
777 */
778
779void
780bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
781{
782 struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
783
784 bfa_trc(cee, event);
785
786 switch (event) {
787 case BFA_IOC_E_DISABLED:
788 case BFA_IOC_E_FAILED:
789 if (cee->get_attr_pending == BFA_TRUE) {
790 cee->get_attr_status = BFA_STATUS_FAILED;
791 cee->get_attr_pending = BFA_FALSE;
792 if (cee->cbfn.get_attr_cbfn) {
793 cee->cbfn.get_attr_cbfn(
794 cee->cbfn.get_attr_cbarg,
795 BFA_STATUS_FAILED);
796 }
797 }
798 if (cee->get_stats_pending == BFA_TRUE) {
799 cee->get_stats_status = BFA_STATUS_FAILED;
800 cee->get_stats_pending = BFA_FALSE;
801 if (cee->cbfn.get_stats_cbfn) {
802 cee->cbfn.get_stats_cbfn(
803 cee->cbfn.get_stats_cbarg,
804 BFA_STATUS_FAILED);
805 }
806 }
807 if (cee->reset_stats_pending == BFA_TRUE) {
808 cee->reset_stats_status = BFA_STATUS_FAILED;
809 cee->reset_stats_pending = BFA_FALSE;
810 if (cee->cbfn.reset_stats_cbfn) {
811 cee->cbfn.reset_stats_cbfn(
812 cee->cbfn.reset_stats_cbarg,
813 BFA_STATUS_FAILED);
814 }
815 }
816 break;
817
818 default:
819 break;
820 }
821}
822
823/*
824 * bfa_cee_attach()
825 *
826 * @brief CEE module-attach API
827 *
828 * @param[in] cee - Pointer to the CEE module data structure
829 * ioc - Pointer to the ioc module data structure
830 * dev - Pointer to the device driver module data structure
831 * The device driver specific mbox ISR functions have
832 * this pointer as one of the parameters.
833 *
834 * @return void
835 */
836void
837bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
838 void *dev)
839{
840 WARN_ON(cee == NULL);
841 cee->dev = dev;
842 cee->ioc = ioc;
843
844 bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
845 bfa_q_qe_init(&cee->ioc_notify);
846 bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
847 list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
848}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index c4ee9db6b470..947f897328d6 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -43,12 +43,16 @@ struct bfa_port_s {
43 bfa_port_endis_cbfn_t endis_cbfn; 43 bfa_port_endis_cbfn_t endis_cbfn;
44 void *endis_cbarg; 44 void *endis_cbarg;
45 bfa_status_t endis_status; 45 bfa_status_t endis_status;
46 struct bfa_ioc_hbfail_notify_s hbfail; 46 struct bfa_ioc_notify_s ioc_notify;
47 bfa_boolean_t pbc_disabled;
48 struct bfa_mem_dma_s port_dma;
47}; 49};
48 50
51#define BFA_MEM_PORT_DMA(__bfa) (&((__bfa)->modules.port.port_dma))
52
49void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, 53void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
50 void *dev, struct bfa_trc_mod_s *trcmod); 54 void *dev, struct bfa_trc_mod_s *trcmod);
51void bfa_port_hbfail(void *arg); 55void bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
52 56
53bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, 57bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
54 union bfa_port_stats_u *stats, 58 union bfa_port_stats_u *stats,
@@ -62,4 +66,58 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
62u32 bfa_port_meminfo(void); 66u32 bfa_port_meminfo(void);
63void bfa_port_mem_claim(struct bfa_port_s *port, 67void bfa_port_mem_claim(struct bfa_port_s *port,
64 u8 *dma_kva, u64 dma_pa); 68 u8 *dma_kva, u64 dma_pa);
69
70/*
71 * CEE declaration
72 */
73typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
74typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
75typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
76
77struct bfa_cee_cbfn_s {
78 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
79 void *get_attr_cbarg;
80 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
81 void *get_stats_cbarg;
82 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
83 void *reset_stats_cbarg;
84};
85
86struct bfa_cee_s {
87 void *dev;
88 bfa_boolean_t get_attr_pending;
89 bfa_boolean_t get_stats_pending;
90 bfa_boolean_t reset_stats_pending;
91 bfa_status_t get_attr_status;
92 bfa_status_t get_stats_status;
93 bfa_status_t reset_stats_status;
94 struct bfa_cee_cbfn_s cbfn;
95 struct bfa_ioc_notify_s ioc_notify;
96 struct bfa_trc_mod_s *trcmod;
97 struct bfa_cee_attr_s *attr;
98 struct bfa_cee_stats_s *stats;
99 struct bfa_dma_s attr_dma;
100 struct bfa_dma_s stats_dma;
101 struct bfa_ioc_s *ioc;
102 struct bfa_mbox_cmd_s get_cfg_mb;
103 struct bfa_mbox_cmd_s get_stats_mb;
104 struct bfa_mbox_cmd_s reset_stats_mb;
105 struct bfa_mem_dma_s cee_dma;
106};
107
108#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma))
109
110u32 bfa_cee_meminfo(void);
111void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa);
112void bfa_cee_attach(struct bfa_cee_s *cee,
113 struct bfa_ioc_s *ioc, void *dev);
114bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee,
115 struct bfa_cee_attr_s *attr,
116 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
117bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee,
118 struct bfa_cee_stats_s *stats,
119 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
120bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee,
121 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
122
65#endif /* __BFA_PORT_H__ */ 123#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 16d9a5f61c18..21caaefce99f 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -21,6 +21,7 @@
21#include "bfa_modules.h" 21#include "bfa_modules.h"
22 22
23BFA_TRC_FILE(HAL, FCXP); 23BFA_TRC_FILE(HAL, FCXP);
24BFA_MODULE(fcdiag);
24BFA_MODULE(fcxp); 25BFA_MODULE(fcxp);
25BFA_MODULE(sgpg); 26BFA_MODULE(sgpg);
26BFA_MODULE(lps); 27BFA_MODULE(lps);
@@ -113,11 +114,10 @@ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
113/* 114/*
114 * forward declarations for LPS functions 115 * forward declarations for LPS functions
115 */ 116 */
116static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 117static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
117 u32 *dm_len); 118 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
118static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, 119static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
119 struct bfa_iocfc_cfg_s *cfg, 120 struct bfa_iocfc_cfg_s *cfg,
120 struct bfa_meminfo_s *meminfo,
121 struct bfa_pcidev_s *pcidev); 121 struct bfa_pcidev_s *pcidev);
122static void bfa_lps_detach(struct bfa_s *bfa); 122static void bfa_lps_detach(struct bfa_s *bfa);
123static void bfa_lps_start(struct bfa_s *bfa); 123static void bfa_lps_start(struct bfa_s *bfa);
@@ -125,6 +125,7 @@ static void bfa_lps_stop(struct bfa_s *bfa);
125static void bfa_lps_iocdisable(struct bfa_s *bfa); 125static void bfa_lps_iocdisable(struct bfa_s *bfa);
126static void bfa_lps_login_rsp(struct bfa_s *bfa, 126static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 struct bfi_lps_login_rsp_s *rsp); 127 struct bfi_lps_login_rsp_s *rsp);
128static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
128static void bfa_lps_logout_rsp(struct bfa_s *bfa, 129static void bfa_lps_logout_rsp(struct bfa_s *bfa,
129 struct bfi_lps_logout_rsp_s *rsp); 130 struct bfi_lps_logout_rsp_s *rsp);
130static void bfa_lps_reqq_resume(void *lps_arg); 131static void bfa_lps_reqq_resume(void *lps_arg);
@@ -430,51 +431,17 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
430 */ 431 */
431 432
432static void 433static void
433claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) 434claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
434{
435 u8 *dm_kva = NULL;
436 u64 dm_pa;
437 u32 buf_pool_sz;
438
439 dm_kva = bfa_meminfo_dma_virt(mi);
440 dm_pa = bfa_meminfo_dma_phys(mi);
441
442 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
443
444 /*
445 * Initialize the fcxp req payload list
446 */
447 mod->req_pld_list_kva = dm_kva;
448 mod->req_pld_list_pa = dm_pa;
449 dm_kva += buf_pool_sz;
450 dm_pa += buf_pool_sz;
451 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
452
453 /*
454 * Initialize the fcxp rsp payload list
455 */
456 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
457 mod->rsp_pld_list_kva = dm_kva;
458 mod->rsp_pld_list_pa = dm_pa;
459 dm_kva += buf_pool_sz;
460 dm_pa += buf_pool_sz;
461 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
462
463 bfa_meminfo_dma_virt(mi) = dm_kva;
464 bfa_meminfo_dma_phys(mi) = dm_pa;
465}
466
467static void
468claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
469{ 435{
470 u16 i; 436 u16 i;
471 struct bfa_fcxp_s *fcxp; 437 struct bfa_fcxp_s *fcxp;
472 438
473 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); 439 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
474 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); 440 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
475 441
476 INIT_LIST_HEAD(&mod->fcxp_free_q); 442 INIT_LIST_HEAD(&mod->fcxp_free_q);
477 INIT_LIST_HEAD(&mod->fcxp_active_q); 443 INIT_LIST_HEAD(&mod->fcxp_active_q);
444 INIT_LIST_HEAD(&mod->fcxp_unused_q);
478 445
479 mod->fcxp_list = fcxp; 446 mod->fcxp_list = fcxp;
480 447
@@ -489,40 +456,53 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
489 fcxp = fcxp + 1; 456 fcxp = fcxp + 1;
490 } 457 }
491 458
492 bfa_meminfo_kva(mi) = (void *)fcxp; 459 bfa_mem_kva_curp(mod) = (void *)fcxp;
493} 460}
494 461
495static void 462static void
496bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 463bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
497 u32 *dm_len) 464 struct bfa_s *bfa)
498{ 465{
499 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs; 466 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
467 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
468 struct bfa_mem_dma_s *seg_ptr;
469 u16 nsegs, idx, per_seg_fcxp;
470 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
471 u32 per_fcxp_sz;
500 472
501 if (num_fcxp_reqs == 0) 473 if (num_fcxps == 0)
502 return; 474 return;
503 475
504 /*
505 * Account for req/rsp payload
506 */
507 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
508 if (cfg->drvcfg.min_cfg) 476 if (cfg->drvcfg.min_cfg)
509 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; 477 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
510 else 478 else
511 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs; 479 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
512 480
513 /* 481 /* dma memory */
514 * Account for fcxp structs 482 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
515 */ 483 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
516 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs; 484
485 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
486 if (num_fcxps >= per_seg_fcxp) {
487 num_fcxps -= per_seg_fcxp;
488 bfa_mem_dma_setup(minfo, seg_ptr,
489 per_seg_fcxp * per_fcxp_sz);
490 } else
491 bfa_mem_dma_setup(minfo, seg_ptr,
492 num_fcxps * per_fcxp_sz);
493 }
494
495 /* kva memory */
496 bfa_mem_kva_setup(minfo, fcxp_kva,
497 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
517} 498}
518 499
519static void 500static void
520bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 501bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
521 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 502 struct bfa_pcidev_s *pcidev)
522{ 503{
523 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 504 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
524 505
525 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
526 mod->bfa = bfa; 506 mod->bfa = bfa;
527 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; 507 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
528 508
@@ -535,8 +515,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
535 515
536 INIT_LIST_HEAD(&mod->wait_q); 516 INIT_LIST_HEAD(&mod->wait_q);
537 517
538 claim_fcxp_req_rsp_mem(mod, meminfo); 518 claim_fcxps_mem(mod);
539 claim_fcxps_mem(mod, meminfo);
540} 519}
541 520
542static void 521static void
@@ -561,6 +540,9 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
561 struct bfa_fcxp_s *fcxp; 540 struct bfa_fcxp_s *fcxp;
562 struct list_head *qe, *qen; 541 struct list_head *qe, *qen;
563 542
543 /* Enqueue unused fcxp resources to free_q */
544 list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
545
564 list_for_each_safe(qe, qen, &mod->fcxp_active_q) { 546 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
565 fcxp = (struct bfa_fcxp_s *) qe; 547 fcxp = (struct bfa_fcxp_s *) qe;
566 if (fcxp->caller == NULL) { 548 if (fcxp->caller == NULL) {
@@ -750,23 +732,6 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
750} 732}
751 733
752static void 734static void
753hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
754{
755 union bfi_addr_u sga_zero = { {0} };
756
757 sge->sg_len = reqlen;
758 sge->flags = BFI_SGE_DATA_LAST;
759 bfa_dma_addr_set(sge[0].sga, req_pa);
760 bfa_sge_to_be(sge);
761 sge++;
762
763 sge->sga = sga_zero;
764 sge->sg_len = reqlen;
765 sge->flags = BFI_SGE_PGDLEN;
766 bfa_sge_to_be(sge);
767}
768
769static void
770hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, 735hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
771 struct fchs_s *fchs) 736 struct fchs_s *fchs)
772{ 737{
@@ -846,7 +811,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
846 struct bfa_rport_s *rport = reqi->bfa_rport; 811 struct bfa_rport_s *rport = reqi->bfa_rport;
847 812
848 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, 813 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
849 bfa_lpuid(bfa)); 814 bfa_fn_lpu(bfa));
850 815
851 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); 816 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
852 if (rport) { 817 if (rport) {
@@ -860,7 +825,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
860 } 825 }
861 826
862 send_req->vf_id = cpu_to_be16(reqi->vf_id); 827 send_req->vf_id = cpu_to_be16(reqi->vf_id);
863 send_req->lp_tag = reqi->lp_tag; 828 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
864 send_req->class = reqi->class; 829 send_req->class = reqi->class;
865 send_req->rsp_timeout = rspi->rsp_timeout; 830 send_req->rsp_timeout = rspi->rsp_timeout;
866 send_req->cts = reqi->cts; 831 send_req->cts = reqi->cts;
@@ -873,18 +838,16 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
873 * setup req sgles 838 * setup req sgles
874 */ 839 */
875 if (fcxp->use_ireqbuf == 1) { 840 if (fcxp->use_ireqbuf == 1) {
876 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len, 841 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
877 BFA_FCXP_REQ_PLD_PA(fcxp)); 842 BFA_FCXP_REQ_PLD_PA(fcxp));
878 } else { 843 } else {
879 if (fcxp->nreq_sgles > 0) { 844 if (fcxp->nreq_sgles > 0) {
880 WARN_ON(fcxp->nreq_sgles != 1); 845 WARN_ON(fcxp->nreq_sgles != 1);
881 hal_fcxp_set_local_sges(send_req->req_sge, 846 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
882 reqi->req_tot_len, 847 fcxp->req_sga_cbfn(fcxp->caller, 0));
883 fcxp->req_sga_cbfn(fcxp->caller,
884 0));
885 } else { 848 } else {
886 WARN_ON(reqi->req_tot_len != 0); 849 WARN_ON(reqi->req_tot_len != 0);
887 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); 850 bfa_alen_set(&send_req->rsp_alen, 0, 0);
888 } 851 }
889 } 852 }
890 853
@@ -894,25 +857,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
894 if (fcxp->use_irspbuf == 1) { 857 if (fcxp->use_irspbuf == 1) {
895 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); 858 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
896 859
897 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, 860 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
898 BFA_FCXP_RSP_PLD_PA(fcxp)); 861 BFA_FCXP_RSP_PLD_PA(fcxp));
899
900 } else { 862 } else {
901 if (fcxp->nrsp_sgles > 0) { 863 if (fcxp->nrsp_sgles > 0) {
902 WARN_ON(fcxp->nrsp_sgles != 1); 864 WARN_ON(fcxp->nrsp_sgles != 1);
903 hal_fcxp_set_local_sges(send_req->rsp_sge, 865 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
904 rspi->rsp_maxlen, 866 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
905 fcxp->rsp_sga_cbfn(fcxp->caller, 867
906 0));
907 } else { 868 } else {
908 WARN_ON(rspi->rsp_maxlen != 0); 869 WARN_ON(rspi->rsp_maxlen != 0);
909 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); 870 bfa_alen_set(&send_req->rsp_alen, 0, 0);
910 } 871 }
911 } 872 }
912 873
913 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); 874 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
914 875
915 bfa_reqq_produce(bfa, BFA_REQQ_FCXP); 876 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
916 877
917 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); 878 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
918 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); 879 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
@@ -978,8 +939,8 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
978 void *reqbuf; 939 void *reqbuf;
979 940
980 WARN_ON(fcxp->use_ireqbuf != 1); 941 WARN_ON(fcxp->use_ireqbuf != 1);
981 reqbuf = ((u8 *)mod->req_pld_list_kva) + 942 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
982 fcxp->fcxp_tag * mod->req_pld_sz; 943 mod->req_pld_sz + mod->rsp_pld_sz);
983 return reqbuf; 944 return reqbuf;
984} 945}
985 946
@@ -1002,13 +963,15 @@ void *
1002bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) 963bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1003{ 964{
1004 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 965 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1005 void *rspbuf; 966 void *fcxp_buf;
1006 967
1007 WARN_ON(fcxp->use_irspbuf != 1); 968 WARN_ON(fcxp->use_irspbuf != 1);
1008 969
1009 rspbuf = ((u8 *)mod->rsp_pld_list_kva) + 970 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
1010 fcxp->fcxp_tag * mod->rsp_pld_sz; 971 mod->req_pld_sz + mod->rsp_pld_sz);
1011 return rspbuf; 972
973 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
974 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
1012} 975}
1013 976
1014/* 977/*
@@ -1181,6 +1144,18 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1181 return mod->rsp_pld_sz; 1144 return mod->rsp_pld_sz;
1182} 1145}
1183 1146
1147void
1148bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1149{
1150 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1151 struct list_head *qe;
1152 int i;
1153
1154 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1155 bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1156 list_add_tail(qe, &mod->fcxp_unused_q);
1157 }
1158}
1184 1159
1185/* 1160/*
1186 * BFA LPS state machine functions 1161 * BFA LPS state machine functions
@@ -1192,7 +1167,7 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1192static void 1167static void
1193bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) 1168bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1194{ 1169{
1195 bfa_trc(lps->bfa, lps->lp_tag); 1170 bfa_trc(lps->bfa, lps->bfa_tag);
1196 bfa_trc(lps->bfa, event); 1171 bfa_trc(lps->bfa, event);
1197 1172
1198 switch (event) { 1173 switch (event) {
@@ -1244,7 +1219,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1244static void 1219static void
1245bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) 1220bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1246{ 1221{
1247 bfa_trc(lps->bfa, lps->lp_tag); 1222 bfa_trc(lps->bfa, lps->bfa_tag);
1248 bfa_trc(lps->bfa, event); 1223 bfa_trc(lps->bfa, event);
1249 1224
1250 switch (event) { 1225 switch (event) {
@@ -1278,6 +1253,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1278 break; 1253 break;
1279 1254
1280 case BFA_LPS_SM_OFFLINE: 1255 case BFA_LPS_SM_OFFLINE:
1256 case BFA_LPS_SM_DELETE:
1281 bfa_sm_set_state(lps, bfa_lps_sm_init); 1257 bfa_sm_set_state(lps, bfa_lps_sm_init);
1282 break; 1258 break;
1283 1259
@@ -1297,7 +1273,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1297static void 1273static void
1298bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) 1274bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1299{ 1275{
1300 bfa_trc(lps->bfa, lps->lp_tag); 1276 bfa_trc(lps->bfa, lps->bfa_tag);
1301 bfa_trc(lps->bfa, event); 1277 bfa_trc(lps->bfa, event);
1302 1278
1303 switch (event) { 1279 switch (event) {
@@ -1306,6 +1282,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1306 break; 1282 break;
1307 1283
1308 case BFA_LPS_SM_OFFLINE: 1284 case BFA_LPS_SM_OFFLINE:
1285 case BFA_LPS_SM_DELETE:
1309 bfa_sm_set_state(lps, bfa_lps_sm_init); 1286 bfa_sm_set_state(lps, bfa_lps_sm_init);
1310 bfa_reqq_wcancel(&lps->wqe); 1287 bfa_reqq_wcancel(&lps->wqe);
1311 break; 1288 break;
@@ -1329,7 +1306,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1329static void 1306static void
1330bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) 1307bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1331{ 1308{
1332 bfa_trc(lps->bfa, lps->lp_tag); 1309 bfa_trc(lps->bfa, lps->bfa_tag);
1333 bfa_trc(lps->bfa, event); 1310 bfa_trc(lps->bfa, event);
1334 1311
1335 switch (event) { 1312 switch (event) {
@@ -1378,7 +1355,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1378static void 1355static void
1379bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) 1356bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1380{ 1357{
1381 bfa_trc(lps->bfa, lps->lp_tag); 1358 bfa_trc(lps->bfa, lps->bfa_tag);
1382 bfa_trc(lps->bfa, event); 1359 bfa_trc(lps->bfa, event);
1383 1360
1384 switch (event) { 1361 switch (event) {
@@ -1420,7 +1397,7 @@ bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1420static void 1397static void
1421bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) 1398bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1422{ 1399{
1423 bfa_trc(lps->bfa, lps->lp_tag); 1400 bfa_trc(lps->bfa, lps->bfa_tag);
1424 bfa_trc(lps->bfa, event); 1401 bfa_trc(lps->bfa, event);
1425 1402
1426 switch (event) { 1403 switch (event) {
@@ -1430,6 +1407,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1430 break; 1407 break;
1431 1408
1432 case BFA_LPS_SM_OFFLINE: 1409 case BFA_LPS_SM_OFFLINE:
1410 case BFA_LPS_SM_DELETE:
1433 bfa_sm_set_state(lps, bfa_lps_sm_init); 1411 bfa_sm_set_state(lps, bfa_lps_sm_init);
1434 break; 1412 break;
1435 1413
@@ -1444,7 +1422,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1444static void 1422static void
1445bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) 1423bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1446{ 1424{
1447 bfa_trc(lps->bfa, lps->lp_tag); 1425 bfa_trc(lps->bfa, lps->bfa_tag);
1448 bfa_trc(lps->bfa, event); 1426 bfa_trc(lps->bfa, event);
1449 1427
1450 switch (event) { 1428 switch (event) {
@@ -1454,6 +1432,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1454 break; 1432 break;
1455 1433
1456 case BFA_LPS_SM_OFFLINE: 1434 case BFA_LPS_SM_OFFLINE:
1435 case BFA_LPS_SM_DELETE:
1457 bfa_sm_set_state(lps, bfa_lps_sm_init); 1436 bfa_sm_set_state(lps, bfa_lps_sm_init);
1458 bfa_reqq_wcancel(&lps->wqe); 1437 bfa_reqq_wcancel(&lps->wqe);
1459 break; 1438 break;
@@ -1473,13 +1452,17 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1473 * return memory requirement 1452 * return memory requirement
1474 */ 1453 */
1475static void 1454static void
1476bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 1455bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1477 u32 *dm_len) 1456 struct bfa_s *bfa)
1478{ 1457{
1458 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1459
1479 if (cfg->drvcfg.min_cfg) 1460 if (cfg->drvcfg.min_cfg)
1480 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS; 1461 bfa_mem_kva_setup(minfo, lps_kva,
1462 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1481 else 1463 else
1482 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; 1464 bfa_mem_kva_setup(minfo, lps_kva,
1465 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1483} 1466}
1484 1467
1485/* 1468/*
@@ -1487,28 +1470,28 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1487 */ 1470 */
1488static void 1471static void
1489bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 1472bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1490 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 1473 struct bfa_pcidev_s *pcidev)
1491{ 1474{
1492 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1475 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1493 struct bfa_lps_s *lps; 1476 struct bfa_lps_s *lps;
1494 int i; 1477 int i;
1495 1478
1496 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1497 mod->num_lps = BFA_LPS_MAX_LPORTS; 1479 mod->num_lps = BFA_LPS_MAX_LPORTS;
1498 if (cfg->drvcfg.min_cfg) 1480 if (cfg->drvcfg.min_cfg)
1499 mod->num_lps = BFA_LPS_MIN_LPORTS; 1481 mod->num_lps = BFA_LPS_MIN_LPORTS;
1500 else 1482 else
1501 mod->num_lps = BFA_LPS_MAX_LPORTS; 1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
1502 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo); 1484 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1503 1485
1504 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s); 1486 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1505 1487
1506 INIT_LIST_HEAD(&mod->lps_free_q); 1488 INIT_LIST_HEAD(&mod->lps_free_q);
1507 INIT_LIST_HEAD(&mod->lps_active_q); 1489 INIT_LIST_HEAD(&mod->lps_active_q);
1490 INIT_LIST_HEAD(&mod->lps_login_q);
1508 1491
1509 for (i = 0; i < mod->num_lps; i++, lps++) { 1492 for (i = 0; i < mod->num_lps; i++, lps++) {
1510 lps->bfa = bfa; 1493 lps->bfa = bfa;
1511 lps->lp_tag = (u8) i; 1494 lps->bfa_tag = (u8) i;
1512 lps->reqq = BFA_REQQ_LPS; 1495 lps->reqq = BFA_REQQ_LPS;
1513 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); 1496 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1514 list_add_tail(&lps->qe, &mod->lps_free_q); 1497 list_add_tail(&lps->qe, &mod->lps_free_q);
@@ -1544,6 +1527,11 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
1544 lps = (struct bfa_lps_s *) qe; 1527 lps = (struct bfa_lps_s *) qe;
1545 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); 1528 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1546 } 1529 }
1530 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1531 lps = (struct bfa_lps_s *) qe;
1532 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1533 }
1534 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1547} 1535}
1548 1536
1549/* 1537/*
@@ -1555,12 +1543,13 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1555 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1543 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1556 struct bfa_lps_s *lps; 1544 struct bfa_lps_s *lps;
1557 1545
1558 WARN_ON(rsp->lp_tag >= mod->num_lps); 1546 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1559 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); 1547 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1560 1548
1561 lps->status = rsp->status; 1549 lps->status = rsp->status;
1562 switch (rsp->status) { 1550 switch (rsp->status) {
1563 case BFA_STATUS_OK: 1551 case BFA_STATUS_OK:
1552 lps->fw_tag = rsp->fw_tag;
1564 lps->fport = rsp->f_port; 1553 lps->fport = rsp->f_port;
1565 if (lps->fport) 1554 if (lps->fport)
1566 lps->lp_pid = rsp->lp_pid; 1555 lps->lp_pid = rsp->lp_pid;
@@ -1572,6 +1561,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1572 lps->lp_mac = rsp->lp_mac; 1561 lps->lp_mac = rsp->lp_mac;
1573 lps->brcd_switch = rsp->brcd_switch; 1562 lps->brcd_switch = rsp->brcd_switch;
1574 lps->fcf_mac = rsp->fcf_mac; 1563 lps->fcf_mac = rsp->fcf_mac;
1564 lps->pr_bbscn = rsp->bb_scn;
1575 1565
1576 break; 1566 break;
1577 1567
@@ -1586,14 +1576,46 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1586 1576
1587 break; 1577 break;
1588 1578
1579 case BFA_STATUS_VPORT_MAX:
1580 if (!rsp->ext_status)
1581 bfa_lps_no_res(lps, rsp->ext_status);
1582 break;
1583
1589 default: 1584 default:
1590 /* Nothing to do with other status */ 1585 /* Nothing to do with other status */
1591 break; 1586 break;
1592 } 1587 }
1593 1588
1589 list_del(&lps->qe);
1590 list_add_tail(&lps->qe, &mod->lps_active_q);
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1591 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595} 1592}
1596 1593
1594static void
1595bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1596{
1597 struct bfa_s *bfa = first_lps->bfa;
1598 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1599 struct list_head *qe, *qe_next;
1600 struct bfa_lps_s *lps;
1601
1602 bfa_trc(bfa, count);
1603
1604 qe = bfa_q_next(first_lps);
1605
1606 while (count && qe) {
1607 qe_next = bfa_q_next(qe);
1608 lps = (struct bfa_lps_s *)qe;
1609 bfa_trc(bfa, lps->bfa_tag);
1610 lps->status = first_lps->status;
1611 list_del(&lps->qe);
1612 list_add_tail(&lps->qe, &mod->lps_active_q);
1613 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1614 qe = qe_next;
1615 count--;
1616 }
1617}
1618
1597/* 1619/*
1598 * Firmware logout response 1620 * Firmware logout response
1599 */ 1621 */
@@ -1603,8 +1625,8 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1625 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1604 struct bfa_lps_s *lps; 1626 struct bfa_lps_s *lps;
1605 1627
1606 WARN_ON(rsp->lp_tag >= mod->num_lps); 1628 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1607 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); 1629 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1608 1630
1609 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); 1631 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1610} 1632}
@@ -1618,7 +1640,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1618 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); 1640 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1619 struct bfa_lps_s *lps; 1641 struct bfa_lps_s *lps;
1620 1642
1621 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); 1643 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1622 1644
1623 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); 1645 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1624} 1646}
@@ -1653,23 +1675,27 @@ bfa_lps_free(struct bfa_lps_s *lps)
1653static void 1675static void
1654bfa_lps_send_login(struct bfa_lps_s *lps) 1676bfa_lps_send_login(struct bfa_lps_s *lps)
1655{ 1677{
1678 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1656 struct bfi_lps_login_req_s *m; 1679 struct bfi_lps_login_req_s *m;
1657 1680
1658 m = bfa_reqq_next(lps->bfa, lps->reqq); 1681 m = bfa_reqq_next(lps->bfa, lps->reqq);
1659 WARN_ON(!m); 1682 WARN_ON(!m);
1660 1683
1661 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, 1684 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1662 bfa_lpuid(lps->bfa)); 1685 bfa_fn_lpu(lps->bfa));
1663 1686
1664 m->lp_tag = lps->lp_tag; 1687 m->bfa_tag = lps->bfa_tag;
1665 m->alpa = lps->alpa; 1688 m->alpa = lps->alpa;
1666 m->pdu_size = cpu_to_be16(lps->pdusz); 1689 m->pdu_size = cpu_to_be16(lps->pdusz);
1667 m->pwwn = lps->pwwn; 1690 m->pwwn = lps->pwwn;
1668 m->nwwn = lps->nwwn; 1691 m->nwwn = lps->nwwn;
1669 m->fdisc = lps->fdisc; 1692 m->fdisc = lps->fdisc;
1670 m->auth_en = lps->auth_en; 1693 m->auth_en = lps->auth_en;
1694 m->bb_scn = lps->bb_scn;
1671 1695
1672 bfa_reqq_produce(lps->bfa, lps->reqq); 1696 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1697 list_del(&lps->qe);
1698 list_add_tail(&lps->qe, &mod->lps_login_q);
1673} 1699}
1674 1700
1675/* 1701/*
@@ -1684,11 +1710,11 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
1684 WARN_ON(!m); 1710 WARN_ON(!m);
1685 1711
1686 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, 1712 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1687 bfa_lpuid(lps->bfa)); 1713 bfa_fn_lpu(lps->bfa));
1688 1714
1689 m->lp_tag = lps->lp_tag; 1715 m->fw_tag = lps->fw_tag;
1690 m->port_name = lps->pwwn; 1716 m->port_name = lps->pwwn;
1691 bfa_reqq_produce(lps->bfa, lps->reqq); 1717 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1692} 1718}
1693 1719
1694/* 1720/*
@@ -1703,11 +1729,11 @@ bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1703 WARN_ON(!m); 1729 WARN_ON(!m);
1704 1730
1705 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, 1731 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1706 bfa_lpuid(lps->bfa)); 1732 bfa_fn_lpu(lps->bfa));
1707 1733
1708 m->lp_tag = lps->lp_tag; 1734 m->fw_tag = lps->fw_tag;
1709 m->lp_pid = lps->lp_pid; 1735 m->lp_pid = lps->lp_pid;
1710 bfa_reqq_produce(lps->bfa, lps->reqq); 1736 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1711} 1737}
1712 1738
1713/* 1739/*
@@ -1859,7 +1885,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
1859 */ 1885 */
1860void 1886void
1861bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, 1887bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1862 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) 1888 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1863{ 1889{
1864 lps->uarg = uarg; 1890 lps->uarg = uarg;
1865 lps->alpa = alpa; 1891 lps->alpa = alpa;
@@ -1868,6 +1894,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1868 lps->nwwn = nwwn; 1894 lps->nwwn = nwwn;
1869 lps->fdisc = BFA_FALSE; 1895 lps->fdisc = BFA_FALSE;
1870 lps->auth_en = auth_en; 1896 lps->auth_en = auth_en;
1897 lps->bb_scn = bb_scn;
1871 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); 1898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1872} 1899}
1873 1900
@@ -1898,6 +1925,13 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); 1925 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1899} 1926}
1900 1927
1928u8
1929bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1930{
1931 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1932
1933 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1934}
1901 1935
1902/* 1936/*
1903 * Return lport services tag given the pid 1937 * Return lport services tag given the pid
@@ -1911,7 +1945,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1911 1945
1912 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { 1946 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1913 if (lps->lp_pid == pid) 1947 if (lps->lp_pid == pid)
1914 return lps->lp_tag; 1948 return lps->bfa_tag;
1915 } 1949 }
1916 1950
1917 /* Return base port tag anyway */ 1951 /* Return base port tag anyway */
@@ -1936,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
1936void 1970void
1937bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) 1971bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1938{ 1972{
1939 bfa_trc(lps->bfa, lps->lp_tag); 1973 bfa_trc(lps->bfa, lps->bfa_tag);
1940 bfa_trc(lps->bfa, n2n_pid); 1974 bfa_trc(lps->bfa, n2n_pid);
1941 1975
1942 lps->lp_pid = n2n_pid; 1976 lps->lp_pid = n2n_pid;
@@ -1955,15 +1989,15 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1955 msg.msg = m; 1989 msg.msg = m;
1956 1990
1957 switch (m->mhdr.msg_id) { 1991 switch (m->mhdr.msg_id) {
1958 case BFI_LPS_H2I_LOGIN_RSP: 1992 case BFI_LPS_I2H_LOGIN_RSP:
1959 bfa_lps_login_rsp(bfa, msg.login_rsp); 1993 bfa_lps_login_rsp(bfa, msg.login_rsp);
1960 break; 1994 break;
1961 1995
1962 case BFI_LPS_H2I_LOGOUT_RSP: 1996 case BFI_LPS_I2H_LOGOUT_RSP:
1963 bfa_lps_logout_rsp(bfa, msg.logout_rsp); 1997 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1964 break; 1998 break;
1965 1999
1966 case BFI_LPS_H2I_CVL_EVENT: 2000 case BFI_LPS_I2H_CVL_EVENT:
1967 bfa_lps_rx_cvl_event(bfa, msg.cvl_event); 2001 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1968 break; 2002 break;
1969 2003
@@ -2777,10 +2811,12 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2777 BFA_CACHELINE_SZ)) 2811 BFA_CACHELINE_SZ))
2778 2812
2779static void 2813static void
2780bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 2814bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2781 u32 *dm_len) 2815 struct bfa_s *bfa)
2782{ 2816{
2783 *dm_len += FCPORT_STATS_DMA_SZ; 2817 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2818
2819 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2784} 2820}
2785 2821
2786static void 2822static void
@@ -2792,23 +2828,14 @@ bfa_fcport_qresume(void *cbarg)
2792} 2828}
2793 2829
2794static void 2830static void
2795bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) 2831bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2796{ 2832{
2797 u8 *dm_kva; 2833 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2798 u64 dm_pa;
2799 2834
2800 dm_kva = bfa_meminfo_dma_virt(meminfo); 2835 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2801 dm_pa = bfa_meminfo_dma_phys(meminfo); 2836 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
2802 2837 fcport->stats = (union bfa_fcport_stats_u *)
2803 fcport->stats_kva = dm_kva; 2838 bfa_mem_dma_virt(fcport_dma);
2804 fcport->stats_pa = dm_pa;
2805 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2806
2807 dm_kva += FCPORT_STATS_DMA_SZ;
2808 dm_pa += FCPORT_STATS_DMA_SZ;
2809
2810 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2811 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2812} 2839}
2813 2840
2814/* 2841/*
@@ -2816,18 +2843,17 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2816 */ 2843 */
2817static void 2844static void
2818bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 2845bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2819 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 2846 struct bfa_pcidev_s *pcidev)
2820{ 2847{
2821 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 2848 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2822 struct bfa_port_cfg_s *port_cfg = &fcport->cfg; 2849 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2823 struct bfa_fcport_ln_s *ln = &fcport->ln; 2850 struct bfa_fcport_ln_s *ln = &fcport->ln;
2824 struct timeval tv; 2851 struct timeval tv;
2825 2852
2826 memset(fcport, 0, sizeof(struct bfa_fcport_s));
2827 fcport->bfa = bfa; 2853 fcport->bfa = bfa;
2828 ln->fcport = fcport; 2854 ln->fcport = fcport;
2829 2855
2830 bfa_fcport_mem_claim(fcport, meminfo); 2856 bfa_fcport_mem_claim(fcport);
2831 2857
2832 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); 2858 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2833 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); 2859 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
@@ -2921,6 +2947,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2921{ 2947{
2922 fcport->speed = BFA_PORT_SPEED_UNKNOWN; 2948 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2923 fcport->topology = BFA_PORT_TOPOLOGY_NONE; 2949 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2950 fcport->bbsc_op_state = BFA_FALSE;
2924} 2951}
2925 2952
2926/* 2953/*
@@ -2948,7 +2975,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2948 } 2975 }
2949 2976
2950 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, 2977 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2951 bfa_lpuid(fcport->bfa)); 2978 bfa_fn_lpu(fcport->bfa));
2952 m->nwwn = fcport->nwwn; 2979 m->nwwn = fcport->nwwn;
2953 m->pwwn = fcport->pwwn; 2980 m->pwwn = fcport->pwwn;
2954 m->port_cfg = fcport->cfg; 2981 m->port_cfg = fcport->cfg;
@@ -2962,7 +2989,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2962 /* 2989 /*
2963 * queue I/O message to firmware 2990 * queue I/O message to firmware
2964 */ 2991 */
2965 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 2992 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
2966 return BFA_TRUE; 2993 return BFA_TRUE;
2967} 2994}
2968 2995
@@ -2991,13 +3018,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2991 } 3018 }
2992 3019
2993 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, 3020 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2994 bfa_lpuid(fcport->bfa)); 3021 bfa_fn_lpu(fcport->bfa));
2995 m->msgtag = fcport->msgtag; 3022 m->msgtag = fcport->msgtag;
2996 3023
2997 /* 3024 /*
2998 * queue I/O message to firmware 3025 * queue I/O message to firmware
2999 */ 3026 */
3000 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3027 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3001 3028
3002 return BFA_TRUE; 3029 return BFA_TRUE;
3003} 3030}
@@ -3029,13 +3056,14 @@ bfa_fcport_send_txcredit(void *port_cbarg)
3029 } 3056 }
3030 3057
3031 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, 3058 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3032 bfa_lpuid(fcport->bfa)); 3059 bfa_fn_lpu(fcport->bfa));
3033 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); 3060 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3061 m->bb_scn = fcport->cfg.bb_scn;
3034 3062
3035 /* 3063 /*
3036 * queue I/O message to firmware 3064 * queue I/O message to firmware
3037 */ 3065 */
3038 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3066 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3039} 3067}
3040 3068
3041static void 3069static void
@@ -3139,8 +3167,8 @@ bfa_fcport_send_stats_get(void *cbarg)
3139 3167
3140 memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3168 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3141 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, 3169 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3142 bfa_lpuid(fcport->bfa)); 3170 bfa_fn_lpu(fcport->bfa));
3143 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3171 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3144} 3172}
3145 3173
3146static void 3174static void
@@ -3201,8 +3229,8 @@ bfa_fcport_send_stats_clear(void *cbarg)
3201 3229
3202 memset(msg, 0, sizeof(struct bfi_fcport_req_s)); 3230 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3203 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, 3231 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3204 bfa_lpuid(fcport->bfa)); 3232 bfa_fn_lpu(fcport->bfa));
3205 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); 3233 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3206} 3234}
3207 3235
3208/* 3236/*
@@ -3329,6 +3357,9 @@ bfa_fcport_init(struct bfa_s *bfa)
3329 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 3357 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3330 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 3358 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3331 3359
3360 if (bfa_fcport_is_pbcdisabled(bfa))
3361 bfa->modules.port.pbc_disabled = BFA_TRUE;
3362
3332 WARN_ON(!fcport->cfg.maxfrsize); 3363 WARN_ON(!fcport->cfg.maxfrsize);
3333 WARN_ON(!fcport->cfg.rx_bbcredit); 3364 WARN_ON(!fcport->cfg.rx_bbcredit);
3334 WARN_ON(!fcport->speed_sup); 3365 WARN_ON(!fcport->speed_sup);
@@ -3453,6 +3484,9 @@ bfa_fcport_enable(struct bfa_s *bfa)
3453{ 3484{
3454 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3485 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3455 3486
3487 if (bfa_fcport_is_pbcdisabled(bfa))
3488 return BFA_STATUS_PBC;
3489
3456 if (bfa_ioc_is_disabled(&bfa->ioc)) 3490 if (bfa_ioc_is_disabled(&bfa->ioc))
3457 return BFA_STATUS_IOC_DISABLED; 3491 return BFA_STATUS_IOC_DISABLED;
3458 3492
@@ -3466,6 +3500,8 @@ bfa_fcport_enable(struct bfa_s *bfa)
3466bfa_status_t 3500bfa_status_t
3467bfa_fcport_disable(struct bfa_s *bfa) 3501bfa_fcport_disable(struct bfa_s *bfa)
3468{ 3502{
3503 if (bfa_fcport_is_pbcdisabled(bfa))
3504 return BFA_STATUS_PBC;
3469 3505
3470 if (bfa_ioc_is_disabled(&bfa->ioc)) 3506 if (bfa_ioc_is_disabled(&bfa->ioc))
3471 return BFA_STATUS_IOC_DISABLED; 3507 return BFA_STATUS_IOC_DISABLED;
@@ -3474,6 +3510,21 @@ bfa_fcport_disable(struct bfa_s *bfa)
3474 return BFA_STATUS_OK; 3510 return BFA_STATUS_OK;
3475} 3511}
3476 3512
3513/* If PBC is disabled on port, return error */
3514bfa_status_t
3515bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3516{
3517 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3518 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3519 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3520
3521 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3522 bfa_trc(bfa, fcport->pwwn);
3523 return BFA_STATUS_PBC;
3524 }
3525 return BFA_STATUS_OK;
3526}
3527
3477/* 3528/*
3478 * Configure port speed. 3529 * Configure port speed.
3479 */ 3530 */
@@ -3491,6 +3542,28 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3491 return BFA_STATUS_UNSUPP_SPEED; 3542 return BFA_STATUS_UNSUPP_SPEED;
3492 } 3543 }
3493 3544
3545 /* For Mezz card, port speed entered needs to be checked */
3546 if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
3547 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3548 /* For CT2, 1G is not supported */
3549 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3550 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3551 return BFA_STATUS_UNSUPP_SPEED;
3552
3553 /* Already checked for Auto Speed and Max Speed supp */
3554 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3555 speed == BFA_PORT_SPEED_2GBPS ||
3556 speed == BFA_PORT_SPEED_4GBPS ||
3557 speed == BFA_PORT_SPEED_8GBPS ||
3558 speed == BFA_PORT_SPEED_16GBPS ||
3559 speed == BFA_PORT_SPEED_AUTO))
3560 return BFA_STATUS_UNSUPP_SPEED;
3561 } else {
3562 if (speed != BFA_PORT_SPEED_10GBPS)
3563 return BFA_STATUS_UNSUPP_SPEED;
3564 }
3565 }
3566
3494 fcport->cfg.speed = speed; 3567 fcport->cfg.speed = speed;
3495 3568
3496 return BFA_STATUS_OK; 3569 return BFA_STATUS_OK;
@@ -3624,11 +3697,14 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3624} 3697}
3625 3698
3626void 3699void
3627bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) 3700bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3628{ 3701{
3629 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3702 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3630 3703
3631 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; 3704 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3705 fcport->cfg.bb_scn = bb_scn;
3706 if (bb_scn)
3707 fcport->bbsc_op_state = BFA_TRUE;
3632 bfa_fcport_send_txcredit(fcport); 3708 bfa_fcport_send_txcredit(fcport);
3633} 3709}
3634 3710
@@ -3675,16 +3751,23 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3675 /* beacon attributes */ 3751 /* beacon attributes */
3676 attr->beacon = fcport->beacon; 3752 attr->beacon = fcport->beacon;
3677 attr->link_e2e_beacon = fcport->link_e2e_beacon; 3753 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3678 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
3679 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3680 3754
3681 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 3755 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3682 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); 3756 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3683 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); 3757 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3684 if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) 3758 attr->bbsc_op_status = fcport->bbsc_op_state;
3685 attr->port_state = BFA_PORT_ST_IOCDIS; 3759
3686 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) 3760 /* PBC Disabled State */
3687 attr->port_state = BFA_PORT_ST_FWMISMATCH; 3761 if (bfa_fcport_is_pbcdisabled(bfa))
3762 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3763 else {
3764 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_IOCDIS;
3766 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3767 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3768 else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3769 attr->port_state = BFA_PORT_ST_ACQ_ADDR;
3770 }
3688 3771
3689 /* FCoE vlan */ 3772 /* FCoE vlan */
3690 attr->fcoe_vlan = fcport->fcoe_vlan; 3773 attr->fcoe_vlan = fcport->fcoe_vlan;
@@ -3766,6 +3849,18 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa)
3766} 3849}
3767 3850
3768/* 3851/*
3852 * Enable/Disable FAA feature in port config
3853 */
3854void
3855bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3856{
3857 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3858
3859 bfa_trc(bfa, state);
3860 fcport->cfg.faa_state = state;
3861}
3862
3863/*
3769 * Get default minimum ratelim speed 3864 * Get default minimum ratelim speed
3770 */ 3865 */
3771enum bfa_port_speed 3866enum bfa_port_speed
@@ -3778,6 +3873,22 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3778 3873
3779} 3874}
3780 3875
3876void
3877bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3878 bfa_boolean_t link_e2e_beacon)
3879{
3880 struct bfa_s *bfa = dev;
3881 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3882
3883 bfa_trc(bfa, beacon);
3884 bfa_trc(bfa, link_e2e_beacon);
3885 bfa_trc(bfa, fcport->beacon);
3886 bfa_trc(bfa, fcport->link_e2e_beacon);
3887
3888 fcport->beacon = beacon;
3889 fcport->link_e2e_beacon = link_e2e_beacon;
3890}
3891
3781bfa_boolean_t 3892bfa_boolean_t
3782bfa_fcport_is_linkup(struct bfa_s *bfa) 3893bfa_fcport_is_linkup(struct bfa_s *bfa)
3783{ 3894{
@@ -3797,6 +3908,14 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3797 return fcport->cfg.qos_enabled; 3908 return fcport->cfg.qos_enabled;
3798} 3909}
3799 3910
3911bfa_boolean_t
3912bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3913{
3914 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3915
3916 return fcport->cfg.trunked;
3917}
3918
3800/* 3919/*
3801 * Rport State machine functions 3920 * Rport State machine functions
3802 */ 3921 */
@@ -4286,18 +4405,22 @@ bfa_rport_qresume(void *cbarg)
4286} 4405}
4287 4406
4288static void 4407static void
4289bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 4408bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4290 u32 *dm_len) 4409 struct bfa_s *bfa)
4291{ 4410{
4411 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4412
4292 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) 4413 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4293 cfg->fwcfg.num_rports = BFA_RPORT_MIN; 4414 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4294 4415
4295 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s); 4416 /* kva memory */
4417 bfa_mem_kva_setup(minfo, rport_kva,
4418 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4296} 4419}
4297 4420
4298static void 4421static void
4299bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 4422bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4300 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 4423 struct bfa_pcidev_s *pcidev)
4301{ 4424{
4302 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); 4425 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4303 struct bfa_rport_s *rp; 4426 struct bfa_rport_s *rp;
@@ -4305,8 +4428,9 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4305 4428
4306 INIT_LIST_HEAD(&mod->rp_free_q); 4429 INIT_LIST_HEAD(&mod->rp_free_q);
4307 INIT_LIST_HEAD(&mod->rp_active_q); 4430 INIT_LIST_HEAD(&mod->rp_active_q);
4431 INIT_LIST_HEAD(&mod->rp_unused_q);
4308 4432
4309 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo); 4433 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4310 mod->rps_list = rp; 4434 mod->rps_list = rp;
4311 mod->num_rports = cfg->fwcfg.num_rports; 4435 mod->num_rports = cfg->fwcfg.num_rports;
4312 4436
@@ -4331,7 +4455,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4331 /* 4455 /*
4332 * consume memory 4456 * consume memory
4333 */ 4457 */
4334 bfa_meminfo_kva(meminfo) = (u8 *) rp; 4458 bfa_mem_kva_curp(mod) = (u8 *) rp;
4335} 4459}
4336 4460
4337static void 4461static void
@@ -4356,6 +4480,9 @@ bfa_rport_iocdisable(struct bfa_s *bfa)
4356 struct bfa_rport_s *rport; 4480 struct bfa_rport_s *rport;
4357 struct list_head *qe, *qen; 4481 struct list_head *qe, *qen;
4358 4482
4483 /* Enqueue unused rport resources to free_q */
4484 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4485
4359 list_for_each_safe(qe, qen, &mod->rp_active_q) { 4486 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4360 rport = (struct bfa_rport_s *) qe; 4487 rport = (struct bfa_rport_s *) qe;
4361 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); 4488 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
@@ -4399,11 +4526,11 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4399 } 4526 }
4400 4527
4401 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, 4528 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4402 bfa_lpuid(rp->bfa)); 4529 bfa_fn_lpu(rp->bfa));
4403 m->bfa_handle = rp->rport_tag; 4530 m->bfa_handle = rp->rport_tag;
4404 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); 4531 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4405 m->pid = rp->rport_info.pid; 4532 m->pid = rp->rport_info.pid;
4406 m->lp_tag = rp->rport_info.lp_tag; 4533 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4407 m->local_pid = rp->rport_info.local_pid; 4534 m->local_pid = rp->rport_info.local_pid;
4408 m->fc_class = rp->rport_info.fc_class; 4535 m->fc_class = rp->rport_info.fc_class;
4409 m->vf_en = rp->rport_info.vf_en; 4536 m->vf_en = rp->rport_info.vf_en;
@@ -4413,7 +4540,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4413 /* 4540 /*
4414 * queue I/O message to firmware 4541 * queue I/O message to firmware
4415 */ 4542 */
4416 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4543 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4417 return BFA_TRUE; 4544 return BFA_TRUE;
4418} 4545}
4419 4546
@@ -4432,13 +4559,13 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4432 } 4559 }
4433 4560
4434 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, 4561 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4435 bfa_lpuid(rp->bfa)); 4562 bfa_fn_lpu(rp->bfa));
4436 m->fw_handle = rp->fw_handle; 4563 m->fw_handle = rp->fw_handle;
4437 4564
4438 /* 4565 /*
4439 * queue I/O message to firmware 4566 * queue I/O message to firmware
4440 */ 4567 */
4441 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4568 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4442 return BFA_TRUE; 4569 return BFA_TRUE;
4443} 4570}
4444 4571
@@ -4457,14 +4584,14 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4457 } 4584 }
4458 4585
4459 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, 4586 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4460 bfa_lpuid(rp->bfa)); 4587 bfa_fn_lpu(rp->bfa));
4461 m->fw_handle = rp->fw_handle; 4588 m->fw_handle = rp->fw_handle;
4462 m->speed = (u8)rp->rport_info.speed; 4589 m->speed = (u8)rp->rport_info.speed;
4463 4590
4464 /* 4591 /*
4465 * queue I/O message to firmware 4592 * queue I/O message to firmware
4466 */ 4593 */
4467 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); 4594 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4468 return BFA_TRUE; 4595 return BFA_TRUE;
4469} 4596}
4470 4597
@@ -4514,7 +4641,18 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4514 } 4641 }
4515} 4642}
4516 4643
4644void
4645bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4646{
4647 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4648 struct list_head *qe;
4649 int i;
4517 4650
4651 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4652 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4653 list_add_tail(qe, &mod->rp_unused_q);
4654 }
4655}
4518 4656
4519/* 4657/*
4520 * bfa_rport_api 4658 * bfa_rport_api
@@ -4577,26 +4715,51 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4577 * Compute and return memory needed by FCP(im) module. 4715 * Compute and return memory needed by FCP(im) module.
4578 */ 4716 */
4579static void 4717static void
4580bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, 4718bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4581 u32 *dm_len) 4719 struct bfa_s *bfa)
4582{ 4720{
4721 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4722 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4723 struct bfa_mem_dma_s *seg_ptr;
4724 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
4725 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4726
4583 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) 4727 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4584 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; 4728 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4729 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4730 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
4585 4731
4586 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s); 4732 num_sgpg = cfg->drvcfg.num_sgpgs;
4587 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4588}
4589 4733
4734 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4735 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4736
4737 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4738 if (num_sgpg >= per_seg_sgpg) {
4739 num_sgpg -= per_seg_sgpg;
4740 bfa_mem_dma_setup(minfo, seg_ptr,
4741 per_seg_sgpg * sgpg_sz);
4742 } else
4743 bfa_mem_dma_setup(minfo, seg_ptr,
4744 num_sgpg * sgpg_sz);
4745 }
4746
4747 /* kva memory */
4748 bfa_mem_kva_setup(minfo, sgpg_kva,
4749 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
4750}
4590 4751
4591static void 4752static void
4592bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 4753bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4593 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev) 4754 struct bfa_pcidev_s *pcidev)
4594{ 4755{
4595 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); 4756 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4596 int i;
4597 struct bfa_sgpg_s *hsgpg; 4757 struct bfa_sgpg_s *hsgpg;
4598 struct bfi_sgpg_s *sgpg; 4758 struct bfi_sgpg_s *sgpg;
4599 u64 align_len; 4759 u64 align_len;
4760 struct bfa_mem_dma_s *seg_ptr;
4761 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4762 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
4600 4763
4601 union { 4764 union {
4602 u64 pa; 4765 u64 pa;
@@ -4608,39 +4771,45 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4608 4771
4609 bfa_trc(bfa, cfg->drvcfg.num_sgpgs); 4772 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4610 4773
4611 mod->num_sgpgs = cfg->drvcfg.num_sgpgs; 4774 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4612 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo); 4775
4613 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa); 4776 num_sgpg = cfg->drvcfg.num_sgpgs;
4614 mod->sgpg_arr_pa += align_len; 4777 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4615 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) + 4778
4616 align_len); 4779 /* dma/kva mem claim */
4617 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) + 4780 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
4618 align_len); 4781
4619 4782 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
4620 hsgpg = mod->hsgpg_arr; 4783
4621 sgpg = mod->sgpg_arr; 4784 if (!bfa_mem_dma_virt(seg_ptr))
4622 sgpg_pa.pa = mod->sgpg_arr_pa; 4785 break;
4623 mod->free_sgpgs = mod->num_sgpgs; 4786
4624 4787 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4625 WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)); 4788 bfa_mem_dma_phys(seg_ptr);
4626 4789
4627 for (i = 0; i < mod->num_sgpgs; i++) { 4790 sgpg = (struct bfi_sgpg_s *)
4628 memset(hsgpg, 0, sizeof(*hsgpg)); 4791 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4629 memset(sgpg, 0, sizeof(*sgpg)); 4792 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4630 4793 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4631 hsgpg->sgpg = sgpg; 4794
4632 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); 4795 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4633 hsgpg->sgpg_pa = sgpg_pa_tmp.addr; 4796
4634 list_add_tail(&hsgpg->qe, &mod->sgpg_q); 4797 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4635 4798 memset(hsgpg, 0, sizeof(*hsgpg));
4636 hsgpg++; 4799 memset(sgpg, 0, sizeof(*sgpg));
4637 sgpg++; 4800
4638 sgpg_pa.pa += sizeof(struct bfi_sgpg_s); 4801 hsgpg->sgpg = sgpg;
4802 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4803 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4804 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4805
4806 sgpg++;
4807 hsgpg++;
4808 sgpg_pa.pa += sgpg_sz;
4809 }
4639 } 4810 }
4640 4811
4641 bfa_meminfo_kva(minfo) = (u8 *) hsgpg; 4812 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
4642 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4643 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4644} 4813}
4645 4814
4646static void 4815static void
@@ -4782,31 +4951,13 @@ __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4782} 4951}
4783 4952
4784static void 4953static void
4785claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 4954claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
4786{
4787 u32 uf_pb_tot_sz;
4788
4789 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4790 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4791 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4792 BFA_DMA_ALIGN_SZ);
4793
4794 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4795 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4796
4797 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
4798}
4799
4800static void
4801claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4802{ 4955{
4803 struct bfi_uf_buf_post_s *uf_bp_msg; 4956 struct bfi_uf_buf_post_s *uf_bp_msg;
4804 struct bfi_sge_s *sge;
4805 union bfi_addr_u sga_zero = { {0} };
4806 u16 i; 4957 u16 i;
4807 u16 buf_len; 4958 u16 buf_len;
4808 4959
4809 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi); 4960 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
4810 uf_bp_msg = ufm->uf_buf_posts; 4961 uf_bp_msg = ufm->uf_buf_posts;
4811 4962
4812 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; 4963 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
@@ -4817,28 +4968,18 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4817 buf_len = sizeof(struct bfa_uf_buf_s); 4968 buf_len = sizeof(struct bfa_uf_buf_s);
4818 uf_bp_msg->buf_len = cpu_to_be16(buf_len); 4969 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4819 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, 4970 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4820 bfa_lpuid(ufm->bfa)); 4971 bfa_fn_lpu(ufm->bfa));
4821 4972 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
4822 sge = uf_bp_msg->sge;
4823 sge[0].sg_len = buf_len;
4824 sge[0].flags = BFI_SGE_DATA_LAST;
4825 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4826 bfa_sge_to_be(sge);
4827
4828 sge[1].sg_len = buf_len;
4829 sge[1].flags = BFI_SGE_PGDLEN;
4830 sge[1].sga = sga_zero;
4831 bfa_sge_to_be(&sge[1]);
4832 } 4973 }
4833 4974
4834 /* 4975 /*
4835 * advance pointer beyond consumed memory 4976 * advance pointer beyond consumed memory
4836 */ 4977 */
4837 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; 4978 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
4838} 4979}
4839 4980
4840static void 4981static void
4841claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 4982claim_ufs(struct bfa_uf_mod_s *ufm)
4842{ 4983{
4843 u16 i; 4984 u16 i;
4844 struct bfa_uf_s *uf; 4985 struct bfa_uf_s *uf;
@@ -4846,7 +4987,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4846 /* 4987 /*
4847 * Claim block of memory for UF list 4988 * Claim block of memory for UF list
4848 */ 4989 */
4849 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi); 4990 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
4850 4991
4851 /* 4992 /*
4852 * Initialize UFs and queue it in UF free queue 4993 * Initialize UFs and queue it in UF free queue
@@ -4855,8 +4996,8 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4855 memset(uf, 0, sizeof(struct bfa_uf_s)); 4996 memset(uf, 0, sizeof(struct bfa_uf_s));
4856 uf->bfa = ufm->bfa; 4997 uf->bfa = ufm->bfa;
4857 uf->uf_tag = i; 4998 uf->uf_tag = i;
4858 uf->pb_len = sizeof(struct bfa_uf_buf_s); 4999 uf->pb_len = BFA_PER_UF_DMA_SZ;
4859 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i]; 5000 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
4860 uf->buf_pa = ufm_pbs_pa(ufm, i); 5001 uf->buf_pa = ufm_pbs_pa(ufm, i);
4861 list_add_tail(&uf->qe, &ufm->uf_free_q); 5002 list_add_tail(&uf->qe, &ufm->uf_free_q);
4862 } 5003 }
@@ -4864,48 +5005,57 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4864 /* 5005 /*
4865 * advance memory pointer 5006 * advance memory pointer
4866 */ 5007 */
4867 bfa_meminfo_kva(mi) = (u8 *) uf; 5008 bfa_mem_kva_curp(ufm) = (u8 *) uf;
4868} 5009}
4869 5010
4870static void 5011static void
4871uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) 5012uf_mem_claim(struct bfa_uf_mod_s *ufm)
4872{ 5013{
4873 claim_uf_pbs(ufm, mi); 5014 claim_ufs(ufm);
4874 claim_ufs(ufm, mi); 5015 claim_uf_post_msgs(ufm);
4875 claim_uf_post_msgs(ufm, mi);
4876} 5016}
4877 5017
4878static void 5018static void
4879bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) 5019bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5020 struct bfa_s *bfa)
4880{ 5021{
4881 u32 num_ufs = cfg->fwcfg.num_uf_bufs; 5022 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4882 5023 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
4883 /* 5024 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4884 * dma-able memory for UF posted bufs 5025 struct bfa_mem_dma_s *seg_ptr;
4885 */ 5026 u16 nsegs, idx, per_seg_uf = 0;
4886 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs), 5027
4887 BFA_DMA_ALIGN_SZ); 5028 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5029 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5030
5031 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5032 if (num_ufs >= per_seg_uf) {
5033 num_ufs -= per_seg_uf;
5034 bfa_mem_dma_setup(minfo, seg_ptr,
5035 per_seg_uf * BFA_PER_UF_DMA_SZ);
5036 } else
5037 bfa_mem_dma_setup(minfo, seg_ptr,
5038 num_ufs * BFA_PER_UF_DMA_SZ);
5039 }
4888 5040
4889 /* 5041 /* kva memory */
4890 * kernel Virtual memory for UFs and UF buf post msg copies 5042 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
4891 */ 5043 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
4892 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4893 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4894} 5044}
4895 5045
4896static void 5046static void
4897bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 5047bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4898 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 5048 struct bfa_pcidev_s *pcidev)
4899{ 5049{
4900 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 5050 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4901 5051
4902 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
4903 ufm->bfa = bfa; 5052 ufm->bfa = bfa;
4904 ufm->num_ufs = cfg->fwcfg.num_uf_bufs; 5053 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4905 INIT_LIST_HEAD(&ufm->uf_free_q); 5054 INIT_LIST_HEAD(&ufm->uf_free_q);
4906 INIT_LIST_HEAD(&ufm->uf_posted_q); 5055 INIT_LIST_HEAD(&ufm->uf_posted_q);
5056 INIT_LIST_HEAD(&ufm->uf_unused_q);
4907 5057
4908 uf_mem_claim(ufm, meminfo); 5058 uf_mem_claim(ufm);
4909} 5059}
4910 5060
4911static void 5061static void
@@ -4939,7 +5089,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4939 5089
4940 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], 5090 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
4941 sizeof(struct bfi_uf_buf_post_s)); 5091 sizeof(struct bfi_uf_buf_post_s));
4942 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); 5092 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
4943 5093
4944 bfa_trc(ufm->bfa, uf->uf_tag); 5094 bfa_trc(ufm->bfa, uf->uf_tag);
4945 5095
@@ -4963,11 +5113,15 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4963{ 5113{
4964 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); 5114 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4965 u16 uf_tag = m->buf_tag; 5115 u16 uf_tag = m->buf_tag;
4966 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4967 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; 5116 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4968 u8 *buf = &uf_buf->d[0]; 5117 struct bfa_uf_buf_s *uf_buf;
5118 uint8_t *buf;
4969 struct fchs_s *fchs; 5119 struct fchs_s *fchs;
4970 5120
5121 uf_buf = (struct bfa_uf_buf_s *)
5122 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5123 buf = &uf_buf->d[0];
5124
4971 m->frm_len = be16_to_cpu(m->frm_len); 5125 m->frm_len = be16_to_cpu(m->frm_len);
4972 m->xfr_len = be16_to_cpu(m->xfr_len); 5126 m->xfr_len = be16_to_cpu(m->xfr_len);
4973 5127
@@ -5008,6 +5162,9 @@ bfa_uf_iocdisable(struct bfa_s *bfa)
5008 struct bfa_uf_s *uf; 5162 struct bfa_uf_s *uf;
5009 struct list_head *qe, *qen; 5163 struct list_head *qe, *qen;
5010 5164
5165 /* Enqueue unused uf resources to free_q */
5166 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5167
5011 list_for_each_safe(qe, qen, &ufm->uf_posted_q) { 5168 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5012 uf = (struct bfa_uf_s *) qe; 5169 uf = (struct bfa_uf_s *) qe;
5013 list_del(&uf->qe); 5170 list_del(&uf->qe);
@@ -5072,4 +5229,415 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5072 } 5229 }
5073} 5230}
5074 5231
5232void
5233bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5234{
5235 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5236 struct list_head *qe;
5237 int i;
5238
5239 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5240 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5241 list_add_tail(qe, &mod->uf_unused_q);
5242 }
5243}
5244
5245/*
5246 * BFA fcdiag module
5247 */
5248#define BFA_DIAG_QTEST_TOV 1000 /* msec */
5249
5250/*
5251 * Set port status to busy
5252 */
5253static void
5254bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5255{
5256 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5257
5258 if (fcdiag->lb.lock)
5259 fcport->diag_busy = BFA_TRUE;
5260 else
5261 fcport->diag_busy = BFA_FALSE;
5262}
5263
5264static void
5265bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5266 struct bfa_s *bfa)
5267{
5268}
5269
5270static void
5271bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5272 struct bfa_pcidev_s *pcidev)
5273{
5274 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5275 fcdiag->bfa = bfa;
5276 fcdiag->trcmod = bfa->trcmod;
5277 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5278}
5279
5280static void
5281bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5282{
5283 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5284 bfa_trc(fcdiag, fcdiag->lb.lock);
5285 if (fcdiag->lb.lock) {
5286 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5287 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5288 fcdiag->lb.lock = 0;
5289 bfa_fcdiag_set_busy_status(fcdiag);
5290 }
5291}
5292
5293static void
5294bfa_fcdiag_detach(struct bfa_s *bfa)
5295{
5296}
5297
5298static void
5299bfa_fcdiag_start(struct bfa_s *bfa)
5300{
5301}
5302
5303static void
5304bfa_fcdiag_stop(struct bfa_s *bfa)
5305{
5306}
5307
5308static void
5309bfa_fcdiag_queuetest_timeout(void *cbarg)
5310{
5311 struct bfa_fcdiag_s *fcdiag = cbarg;
5312 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5313
5314 bfa_trc(fcdiag, fcdiag->qtest.all);
5315 bfa_trc(fcdiag, fcdiag->qtest.count);
5316
5317 fcdiag->qtest.timer_active = 0;
5318
5319 res->status = BFA_STATUS_ETIMER;
5320 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5321 if (fcdiag->qtest.all)
5322 res->queue = fcdiag->qtest.all;
5323
5324 bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5325 fcdiag->qtest.status = BFA_STATUS_ETIMER;
5326 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5327 fcdiag->qtest.lock = 0;
5328}
5329
5330static bfa_status_t
5331bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5332{
5333 u32 i;
5334 struct bfi_diag_qtest_req_s *req;
5335
5336 req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5337 if (!req)
5338 return BFA_STATUS_DEVBUSY;
5339
5340 /* build host command */
5341 bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5342 bfa_fn_lpu(fcdiag->bfa));
5343
5344 for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5345 req->data[i] = QTEST_PAT_DEFAULT;
5346
5347 bfa_trc(fcdiag, fcdiag->qtest.queue);
5348 /* ring door bell */
5349 bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5350 return BFA_STATUS_OK;
5351}
5352
5353static void
5354bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5355 bfi_diag_qtest_rsp_t *rsp)
5356{
5357 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5358 bfa_status_t status = BFA_STATUS_OK;
5359 int i;
5360
5361 /* Check timer, should still be active */
5362 if (!fcdiag->qtest.timer_active) {
5363 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5364 return;
5365 }
5366
5367 /* update count */
5368 fcdiag->qtest.count--;
5369
5370 /* Check result */
5371 for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5372 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5373 res->status = BFA_STATUS_DATACORRUPTED;
5374 break;
5375 }
5376 }
5377
5378 if (res->status == BFA_STATUS_OK) {
5379 if (fcdiag->qtest.count > 0) {
5380 status = bfa_fcdiag_queuetest_send(fcdiag);
5381 if (status == BFA_STATUS_OK)
5382 return;
5383 else
5384 res->status = status;
5385 } else if (fcdiag->qtest.all > 0 &&
5386 fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5387 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5388 fcdiag->qtest.queue++;
5389 status = bfa_fcdiag_queuetest_send(fcdiag);
5390 if (status == BFA_STATUS_OK)
5391 return;
5392 else
5393 res->status = status;
5394 }
5395 }
5396
5397 /* Stop timer when we comp all queue */
5398 if (fcdiag->qtest.timer_active) {
5399 bfa_timer_stop(&fcdiag->qtest.timer);
5400 fcdiag->qtest.timer_active = 0;
5401 }
5402 res->queue = fcdiag->qtest.queue;
5403 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5404 bfa_trc(fcdiag, res->count);
5405 bfa_trc(fcdiag, res->status);
5406 fcdiag->qtest.status = res->status;
5407 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5408 fcdiag->qtest.lock = 0;
5409}
5410
5411static void
5412bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5413 struct bfi_diag_lb_rsp_s *rsp)
5414{
5415 struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5416
5417 res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
5418 res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
5419 res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
5420 res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
5421 res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
5422 res->status = rsp->res.status;
5423 fcdiag->lb.status = rsp->res.status;
5424 bfa_trc(fcdiag, fcdiag->lb.status);
5425 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5426 fcdiag->lb.lock = 0;
5427 bfa_fcdiag_set_busy_status(fcdiag);
5428}
5429
5430static bfa_status_t
5431bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5432 struct bfa_diag_loopback_s *loopback)
5433{
5434 struct bfi_diag_lb_req_s *lb_req;
5435
5436 lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5437 if (!lb_req)
5438 return BFA_STATUS_DEVBUSY;
5439
5440 /* build host command */
5441 bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5442 bfa_fn_lpu(fcdiag->bfa));
5443
5444 lb_req->lb_mode = loopback->lb_mode;
5445 lb_req->speed = loopback->speed;
5446 lb_req->loopcnt = loopback->loopcnt;
5447 lb_req->pattern = loopback->pattern;
5448
5449 /* ring door bell */
5450 bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5451
5452 bfa_trc(fcdiag, loopback->lb_mode);
5453 bfa_trc(fcdiag, loopback->speed);
5454 bfa_trc(fcdiag, loopback->loopcnt);
5455 bfa_trc(fcdiag, loopback->pattern);
5456 return BFA_STATUS_OK;
5457}
5458
5459/*
5460 * cpe/rme intr handler
5461 */
5462void
5463bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5464{
5465 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5466
5467 switch (msg->mhdr.msg_id) {
5468 case BFI_DIAG_I2H_LOOPBACK:
5469 bfa_fcdiag_loopback_comp(fcdiag,
5470 (struct bfi_diag_lb_rsp_s *) msg);
5471 break;
5472 case BFI_DIAG_I2H_QTEST:
5473 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5474 break;
5475 default:
5476 bfa_trc(fcdiag, msg->mhdr.msg_id);
5477 WARN_ON(1);
5478 }
5479}
5480
5481/*
5482 * Loopback test
5483 *
5484 * @param[in] *bfa - bfa data struct
5485 * @param[in] opmode - port operation mode
5486 * @param[in] speed - port speed
5487 * @param[in] lpcnt - loop count
5488 * @param[in] pat - pattern to build packet
5489 * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
5490 * @param[in] cbfn - callback function
5491 * @param[in] cbarg - callback functioin arg
5492 *
5493 * @param[out]
5494 */
5495bfa_status_t
5496bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5497 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5498 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5499 void *cbarg)
5500{
5501 struct bfa_diag_loopback_s loopback;
5502 struct bfa_port_attr_s attr;
5503 bfa_status_t status;
5504 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5505
5506 if (!bfa_iocfc_is_operational(bfa))
5507 return BFA_STATUS_IOC_NON_OP;
5508
5509 /* if port is PBC disabled, return error */
5510 if (bfa_fcport_is_pbcdisabled(bfa)) {
5511 bfa_trc(fcdiag, BFA_STATUS_PBC);
5512 return BFA_STATUS_PBC;
5513 }
5075 5514
5515 if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5516 bfa_trc(fcdiag, opmode);
5517 return BFA_STATUS_PORT_NOT_DISABLED;
5518 }
5519
5520 /* Check if the speed is supported */
5521 bfa_fcport_get_attr(bfa, &attr);
5522 bfa_trc(fcdiag, attr.speed_supported);
5523 if (speed > attr.speed_supported)
5524 return BFA_STATUS_UNSUPP_SPEED;
5525
5526 /* For Mezz card, port speed entered needs to be checked */
5527 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5528 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5529 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5530 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5531 return BFA_STATUS_UNSUPP_SPEED;
5532 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5533 speed == BFA_PORT_SPEED_2GBPS ||
5534 speed == BFA_PORT_SPEED_4GBPS ||
5535 speed == BFA_PORT_SPEED_8GBPS ||
5536 speed == BFA_PORT_SPEED_16GBPS ||
5537 speed == BFA_PORT_SPEED_AUTO))
5538 return BFA_STATUS_UNSUPP_SPEED;
5539 } else {
5540 if (speed != BFA_PORT_SPEED_10GBPS)
5541 return BFA_STATUS_UNSUPP_SPEED;
5542 }
5543 }
5544
5545 /* check to see if there is another destructive diag cmd running */
5546 if (fcdiag->lb.lock) {
5547 bfa_trc(fcdiag, fcdiag->lb.lock);
5548 return BFA_STATUS_DEVBUSY;
5549 }
5550
5551 fcdiag->lb.lock = 1;
5552 loopback.lb_mode = opmode;
5553 loopback.speed = speed;
5554 loopback.loopcnt = lpcnt;
5555 loopback.pattern = pat;
5556 fcdiag->lb.result = result;
5557 fcdiag->lb.cbfn = cbfn;
5558 fcdiag->lb.cbarg = cbarg;
5559 memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5560 bfa_fcdiag_set_busy_status(fcdiag);
5561
5562 /* Send msg to fw */
5563 status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5564 return status;
5565}
5566
5567/*
5568 * DIAG queue test command
5569 *
5570 * @param[in] *bfa - bfa data struct
5571 * @param[in] force - 1: don't do ioc op checking
5572 * @param[in] queue - queue no. to test
5573 * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
5574 * @param[in] cbfn - callback function
5575 * @param[in] *cbarg - callback functioin arg
5576 *
5577 * @param[out]
5578 */
5579bfa_status_t
5580bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5581 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5582 void *cbarg)
5583{
5584 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5585 bfa_status_t status;
5586 bfa_trc(fcdiag, force);
5587 bfa_trc(fcdiag, queue);
5588
5589 if (!force && !bfa_iocfc_is_operational(bfa))
5590 return BFA_STATUS_IOC_NON_OP;
5591
5592 /* check to see if there is another destructive diag cmd running */
5593 if (fcdiag->qtest.lock) {
5594 bfa_trc(fcdiag, fcdiag->qtest.lock);
5595 return BFA_STATUS_DEVBUSY;
5596 }
5597
5598 /* Initialization */
5599 fcdiag->qtest.lock = 1;
5600 fcdiag->qtest.cbfn = cbfn;
5601 fcdiag->qtest.cbarg = cbarg;
5602 fcdiag->qtest.result = result;
5603 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5604
5605 /* Init test results */
5606 fcdiag->qtest.result->status = BFA_STATUS_OK;
5607 fcdiag->qtest.result->count = 0;
5608
5609 /* send */
5610 if (queue < BFI_IOC_MAX_CQS) {
5611 fcdiag->qtest.result->queue = (u8)queue;
5612 fcdiag->qtest.queue = (u8)queue;
5613 fcdiag->qtest.all = 0;
5614 } else {
5615 fcdiag->qtest.result->queue = 0;
5616 fcdiag->qtest.queue = 0;
5617 fcdiag->qtest.all = 1;
5618 }
5619 status = bfa_fcdiag_queuetest_send(fcdiag);
5620
5621 /* Start a timer */
5622 if (status == BFA_STATUS_OK) {
5623 bfa_timer_start(bfa, &fcdiag->qtest.timer,
5624 bfa_fcdiag_queuetest_timeout, fcdiag,
5625 BFA_DIAG_QTEST_TOV);
5626 fcdiag->qtest.timer_active = 1;
5627 }
5628 return status;
5629}
5630
5631/*
5632 * DIAG PLB is running
5633 *
5634 * @param[in] *bfa - bfa data struct
5635 *
5636 * @param[out]
5637 */
5638bfa_status_t
5639bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5640{
5641 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5642 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5643}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 5902a45c080f..fbe513a671b5 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -26,6 +26,7 @@
26 * Scatter-gather DMA related defines 26 * Scatter-gather DMA related defines
27 */ 27 */
28#define BFA_SGPG_MIN (16) 28#define BFA_SGPG_MIN (16)
29#define BFA_SGPG_MAX (8192)
29 30
30/* 31/*
31 * Alignment macro for SG page allocation 32 * Alignment macro for SG page allocation
@@ -54,17 +55,21 @@ struct bfa_sgpg_s {
54 */ 55 */
55#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1) 56#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
56 57
58/* Max SGPG dma segs required */
59#define BFA_SGPG_DMA_SEGS \
60 BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
61
57struct bfa_sgpg_mod_s { 62struct bfa_sgpg_mod_s {
58 struct bfa_s *bfa; 63 struct bfa_s *bfa;
59 int num_sgpgs; /* number of SG pages */ 64 int num_sgpgs; /* number of SG pages */
60 int free_sgpgs; /* number of free SG pages */ 65 int free_sgpgs; /* number of free SG pages */
61 struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
62 struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
63 u64 sgpg_arr_pa; /* SG page array DMA addr */
64 struct list_head sgpg_q; /* queue of free SG pages */ 66 struct list_head sgpg_q; /* queue of free SG pages */
65 struct list_head sgpg_wait_q; /* wait queue for SG pages */ 67 struct list_head sgpg_wait_q; /* wait queue for SG pages */
68 struct bfa_mem_dma_s dma_seg[BFA_SGPG_DMA_SEGS];
69 struct bfa_mem_kva_s kva_seg;
66}; 70};
67#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod) 71#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
72#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
68 73
69bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, 74bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
70 int nsgpgs); 75 int nsgpgs);
@@ -79,26 +84,32 @@ void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
79 * FCXP related defines 84 * FCXP related defines
80 */ 85 */
81#define BFA_FCXP_MIN (1) 86#define BFA_FCXP_MIN (1)
87#define BFA_FCXP_MAX (256)
82#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256) 88#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
83#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256) 89#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
84 90
91/* Max FCXP dma segs required */
92#define BFA_FCXP_DMA_SEGS \
93 BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX, \
94 (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
95
85struct bfa_fcxp_mod_s { 96struct bfa_fcxp_mod_s {
86 struct bfa_s *bfa; /* backpointer to BFA */ 97 struct bfa_s *bfa; /* backpointer to BFA */
87 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */ 98 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
88 u16 num_fcxps; /* max num FCXP requests */ 99 u16 num_fcxps; /* max num FCXP requests */
89 struct list_head fcxp_free_q; /* free FCXPs */ 100 struct list_head fcxp_free_q; /* free FCXPs */
90 struct list_head fcxp_active_q; /* active FCXPs */ 101 struct list_head fcxp_active_q; /* active FCXPs */
91 void *req_pld_list_kva; /* list of FCXP req pld */
92 u64 req_pld_list_pa; /* list of FCXP req pld */
93 void *rsp_pld_list_kva; /* list of FCXP resp pld */
94 u64 rsp_pld_list_pa; /* list of FCXP resp pld */
95 struct list_head wait_q; /* wait queue for free fcxp */ 102 struct list_head wait_q; /* wait queue for free fcxp */
103 struct list_head fcxp_unused_q; /* unused fcxps */
96 u32 req_pld_sz; 104 u32 req_pld_sz;
97 u32 rsp_pld_sz; 105 u32 rsp_pld_sz;
106 struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
107 struct bfa_mem_kva_s kva_seg;
98}; 108};
99 109
100#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod) 110#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
101#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag]) 111#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
112#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
102 113
103typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp, 114typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
104 void *cb_arg, bfa_status_t req_status, 115 void *cb_arg, bfa_status_t req_status,
@@ -206,13 +217,15 @@ struct bfa_fcxp_wqe_s {
206#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs)) 217#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
207#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp)) 218#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
208 219
209#define BFA_FCXP_REQ_PLD_PA(_fcxp) \ 220#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
210 ((_fcxp)->fcxp_mod->req_pld_list_pa + \ 221 bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
211 ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag)) 222 (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
212 223
213#define BFA_FCXP_RSP_PLD_PA(_fcxp) \ 224/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
214 ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \ 225#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
215 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag)) 226 (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
227 (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
228 (_fcxp)->fcxp_mod->req_pld_sz)
216 229
217void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 230void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
218 231
@@ -238,10 +251,13 @@ struct bfa_rport_mod_s {
238 struct bfa_rport_s *rps_list; /* list of rports */ 251 struct bfa_rport_s *rps_list; /* list of rports */
239 struct list_head rp_free_q; /* free bfa_rports */ 252 struct list_head rp_free_q; /* free bfa_rports */
240 struct list_head rp_active_q; /* free bfa_rports */ 253 struct list_head rp_active_q; /* free bfa_rports */
254 struct list_head rp_unused_q; /* unused bfa rports */
241 u16 num_rports; /* number of rports */ 255 u16 num_rports; /* number of rports */
256 struct bfa_mem_kva_s kva_seg;
242}; 257};
243 258
244#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) 259#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
260#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
245 261
246/* 262/*
247 * Convert rport tag to RPORT 263 * Convert rport tag to RPORT
@@ -254,6 +270,7 @@ struct bfa_rport_mod_s {
254 * protected functions 270 * protected functions
255 */ 271 */
256void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 272void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
273void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw);
257 274
258/* 275/*
259 * BFA rport information. 276 * BFA rport information.
@@ -298,7 +315,7 @@ struct bfa_rport_s {
298 */ 315 */
299 316
300#define BFA_UF_MIN (4) 317#define BFA_UF_MIN (4)
301 318#define BFA_UF_MAX (256)
302 319
303struct bfa_uf_s { 320struct bfa_uf_s {
304 struct list_head qe; /* queue element */ 321 struct list_head qe; /* queue element */
@@ -326,36 +343,41 @@ struct bfa_uf_s {
326 */ 343 */
327typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf); 344typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
328 345
346#define BFA_UF_BUFSZ (2 * 1024 + 256)
347
348struct bfa_uf_buf_s {
349 u8 d[BFA_UF_BUFSZ];
350};
351
352#define BFA_PER_UF_DMA_SZ \
353 (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
354
355/* Max UF dma segs required */
356#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
357
329struct bfa_uf_mod_s { 358struct bfa_uf_mod_s {
330 struct bfa_s *bfa; /* back pointer to BFA */ 359 struct bfa_s *bfa; /* back pointer to BFA */
331 struct bfa_uf_s *uf_list; /* array of UFs */ 360 struct bfa_uf_s *uf_list; /* array of UFs */
332 u16 num_ufs; /* num unsolicited rx frames */ 361 u16 num_ufs; /* num unsolicited rx frames */
333 struct list_head uf_free_q; /* free UFs */ 362 struct list_head uf_free_q; /* free UFs */
334 struct list_head uf_posted_q; /* UFs posted to IOC */ 363 struct list_head uf_posted_q; /* UFs posted to IOC */
335 struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */ 364 struct list_head uf_unused_q; /* unused UF's */
336 u64 uf_pbs_pa; /* phy addr for UF bufs */
337 struct bfi_uf_buf_post_s *uf_buf_posts; 365 struct bfi_uf_buf_post_s *uf_buf_posts;
338 /* pre-built UF post msgs */ 366 /* pre-built UF post msgs */
339 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */ 367 bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
340 void *cbarg; /* uf receive handler arg */ 368 void *cbarg; /* uf receive handler arg */
369 struct bfa_mem_dma_s dma_seg[BFA_UF_DMA_SEGS];
370 struct bfa_mem_kva_s kva_seg;
341}; 371};
342 372
343#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod) 373#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
374#define BFA_MEM_UF_KVA(__bfa) (&(BFA_UF_MOD(__bfa)->kva_seg))
344 375
345#define ufm_pbs_pa(_ufmod, _uftag) \ 376#define ufm_pbs_pa(_ufmod, _uftag) \
346 ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag)) 377 bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
347 378
348void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 379void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
349 380void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
350#define BFA_UF_BUFSZ (2 * 1024 + 256)
351
352/*
353 * @todo private
354 */
355struct bfa_uf_buf_s {
356 u8 d[BFA_UF_BUFSZ];
357};
358
359 381
360/* 382/*
361 * LPS - bfa lport login/logout service interface 383 * LPS - bfa lport login/logout service interface
@@ -364,7 +386,8 @@ struct bfa_lps_s {
364 struct list_head qe; /* queue element */ 386 struct list_head qe; /* queue element */
365 struct bfa_s *bfa; /* parent bfa instance */ 387 struct bfa_s *bfa; /* parent bfa instance */
366 bfa_sm_t sm; /* finite state machine */ 388 bfa_sm_t sm; /* finite state machine */
367 u8 lp_tag; /* lport tag */ 389 u8 bfa_tag; /* lport tag */
390 u8 fw_tag; /* lport fw tag */
368 u8 reqq; /* lport request queue */ 391 u8 reqq; /* lport request queue */
369 u8 alpa; /* ALPA for loop topologies */ 392 u8 alpa; /* ALPA for loop topologies */
370 u32 lp_pid; /* lport port ID */ 393 u32 lp_pid; /* lport port ID */
@@ -377,6 +400,8 @@ struct bfa_lps_s {
377 bfa_status_t status; /* login status */ 400 bfa_status_t status; /* login status */
378 u16 pdusz; /* max receive PDU size */ 401 u16 pdusz; /* max receive PDU size */
379 u16 pr_bbcred; /* BB_CREDIT from peer */ 402 u16 pr_bbcred; /* BB_CREDIT from peer */
403 u8 pr_bbscn; /* BB_SCN from peer */
404 u8 bb_scn; /* local BB_SCN */
380 u8 lsrjt_rsn; /* LSRJT reason */ 405 u8 lsrjt_rsn; /* LSRJT reason */
381 u8 lsrjt_expl; /* LSRJT explanation */ 406 u8 lsrjt_expl; /* LSRJT explanation */
382 wwn_t pwwn; /* port wwn of lport */ 407 wwn_t pwwn; /* port wwn of lport */
@@ -395,12 +420,15 @@ struct bfa_lps_s {
395struct bfa_lps_mod_s { 420struct bfa_lps_mod_s {
396 struct list_head lps_free_q; 421 struct list_head lps_free_q;
397 struct list_head lps_active_q; 422 struct list_head lps_active_q;
423 struct list_head lps_login_q;
398 struct bfa_lps_s *lps_arr; 424 struct bfa_lps_s *lps_arr;
399 int num_lps; 425 int num_lps;
426 struct bfa_mem_kva_s kva_seg;
400}; 427};
401 428
402#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod) 429#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
403#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag]) 430#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
431#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg))
404 432
405/* 433/*
406 * external functions 434 * external functions
@@ -477,11 +505,14 @@ struct bfa_fcport_s {
477 bfa_boolean_t diag_busy; /* diag busy status */ 505 bfa_boolean_t diag_busy; /* diag busy status */
478 bfa_boolean_t beacon; /* port beacon status */ 506 bfa_boolean_t beacon; /* port beacon status */
479 bfa_boolean_t link_e2e_beacon; /* link beacon status */ 507 bfa_boolean_t link_e2e_beacon; /* link beacon status */
508 bfa_boolean_t bbsc_op_state; /* Cred recov Oper State */
480 struct bfa_fcport_trunk_s trunk; 509 struct bfa_fcport_trunk_s trunk;
481 u16 fcoe_vlan; 510 u16 fcoe_vlan;
511 struct bfa_mem_dma_s fcport_dma;
482}; 512};
483 513
484#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) 514#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
515#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
485 516
486/* 517/*
487 * protected functions 518 * protected functions
@@ -515,8 +546,10 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
515bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); 546bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
516enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); 547enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
517 548
518void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); 549void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
519bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); 550bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
551void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
552 bfa_boolean_t link_e2e_beacon);
520bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); 553bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
521bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, 554bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
522 union bfa_fcport_stats_u *stats, 555 union bfa_fcport_stats_u *stats,
@@ -524,6 +557,9 @@ bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
524bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, 557bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
525 void *cbarg); 558 void *cbarg);
526bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 559bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
560bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
561bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
562void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
527 563
528/* 564/*
529 * bfa rport API functions 565 * bfa rport API functions
@@ -577,6 +613,7 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
577bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); 613bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
578u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); 614u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
579u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); 615u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
616void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw);
580 617
581static inline void * 618static inline void *
582bfa_uf_get_frmbuf(struct bfa_uf_s *uf) 619bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
@@ -606,11 +643,12 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
606void bfa_lps_delete(struct bfa_lps_s *lps); 643void bfa_lps_delete(struct bfa_lps_s *lps);
607void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, 644void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
608 u16 pdusz, wwn_t pwwn, wwn_t nwwn, 645 u16 pdusz, wwn_t pwwn, wwn_t nwwn,
609 bfa_boolean_t auth_en); 646 bfa_boolean_t auth_en, u8 bb_scn);
610void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, 647void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
611 wwn_t pwwn, wwn_t nwwn); 648 wwn_t pwwn, wwn_t nwwn);
612void bfa_lps_fdisclogo(struct bfa_lps_s *lps); 649void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
613void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid); 650void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
651u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
614u32 bfa_lps_get_base_pid(struct bfa_s *bfa); 652u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
615u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); 653u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
616void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); 654void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
@@ -618,4 +656,57 @@ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
618void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); 656void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
619void bfa_cb_lps_cvl_event(void *bfad, void *uarg); 657void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
620 658
659/* FAA specific APIs */
660bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
661 bfa_cb_iocfc_t cbfn, void *cbarg);
662bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
663 bfa_cb_iocfc_t cbfn, void *cbarg);
664bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
665 bfa_cb_iocfc_t cbfn, void *cbarg);
666
667/*
668 * FC DIAG data structure
669 */
670struct bfa_fcdiag_qtest_s {
671 struct bfa_diag_qtest_result_s *result;
672 bfa_cb_diag_t cbfn;
673 void *cbarg;
674 struct bfa_timer_s timer;
675 u32 status;
676 u32 count;
677 u8 lock;
678 u8 queue;
679 u8 all;
680 u8 timer_active;
681};
682
683struct bfa_fcdiag_lb_s {
684 bfa_cb_diag_t cbfn;
685 void *cbarg;
686 void *result;
687 bfa_boolean_t lock;
688 u32 status;
689};
690
691struct bfa_fcdiag_s {
692 struct bfa_s *bfa; /* Back pointer to BFA */
693 struct bfa_trc_mod_s *trcmod;
694 struct bfa_fcdiag_lb_s lb;
695 struct bfa_fcdiag_qtest_s qtest;
696};
697
698#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag)
699
700void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg);
701
702bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa,
703 enum bfa_port_opmode opmode,
704 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
705 struct bfa_diag_loopback_result_s *result,
706 bfa_cb_diag_t cbfn, void *cbarg);
707bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
708 u32 queue, struct bfa_diag_qtest_result_s *result,
709 bfa_cb_diag_t cbfn, void *cbarg);
710bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
711
621#endif /* __BFA_SVC_H__ */ 712#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 59b5e9b61d71..beb30a748ea5 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -56,14 +56,15 @@ int fdmi_enable = BFA_TRUE;
56int pcie_max_read_reqsz; 56int pcie_max_read_reqsz;
57int bfa_debugfs_enable = 1; 57int bfa_debugfs_enable = 1;
58int msix_disable_cb = 0, msix_disable_ct = 0; 58int msix_disable_cb = 0, msix_disable_ct = 0;
59int max_xfer_size = BFAD_MAX_SECTORS >> 1;
59 60
60/* Firmware releated */ 61/* Firmware releated */
61u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; 62u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
62u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; 63u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
63 64
64#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" 65#define BFAD_FW_FILE_CB "cbfw.bin"
65#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" 66#define BFAD_FW_FILE_CT "ctfw.bin"
66#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" 67#define BFAD_FW_FILE_CT2 "ct2fw.bin"
67 68
68static u32 *bfad_load_fwimg(struct pci_dev *pdev); 69static u32 *bfad_load_fwimg(struct pci_dev *pdev);
69static void bfad_free_fwimg(void); 70static void bfad_free_fwimg(void);
@@ -71,18 +72,18 @@ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
71 u32 *bfi_image_size, char *fw_name); 72 u32 *bfi_image_size, char *fw_name);
72 73
73static const char *msix_name_ct[] = { 74static const char *msix_name_ct[] = {
75 "ctrl",
74 "cpe0", "cpe1", "cpe2", "cpe3", 76 "cpe0", "cpe1", "cpe2", "cpe3",
75 "rme0", "rme1", "rme2", "rme3", 77 "rme0", "rme1", "rme2", "rme3" };
76 "ctrl" };
77 78
78static const char *msix_name_cb[] = { 79static const char *msix_name_cb[] = {
79 "cpe0", "cpe1", "cpe2", "cpe3", 80 "cpe0", "cpe1", "cpe2", "cpe3",
80 "rme0", "rme1", "rme2", "rme3", 81 "rme0", "rme1", "rme2", "rme3",
81 "eemc", "elpu0", "elpu1", "epss", "mlpu" }; 82 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
82 83
83MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); 84MODULE_FIRMWARE(BFAD_FW_FILE_CB);
84MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); 85MODULE_FIRMWARE(BFAD_FW_FILE_CT);
85MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); 86MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
86 87
87module_param(os_name, charp, S_IRUGO | S_IWUSR); 88module_param(os_name, charp, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); 89MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
@@ -144,6 +145,9 @@ MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
144module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); 145module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
145MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," 146MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
146 " Range[false:0|true:1]"); 147 " Range[false:0|true:1]");
148module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
149MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
150 " Range[64k|128k|256k|512k|1024k|2048k]");
147 151
148static void 152static void
149bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); 153bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -527,28 +531,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
527void 531void
528bfad_hal_mem_release(struct bfad_s *bfad) 532bfad_hal_mem_release(struct bfad_s *bfad)
529{ 533{
530 int i;
531 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 534 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
532 struct bfa_mem_elem_s *meminfo_elem; 535 struct bfa_mem_dma_s *dma_info, *dma_elem;
533 536 struct bfa_mem_kva_s *kva_info, *kva_elem;
534 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 537 struct list_head *dm_qe, *km_qe;
535 meminfo_elem = &hal_meminfo->meminfo[i]; 538
536 if (meminfo_elem->kva != NULL) { 539 dma_info = &hal_meminfo->dma_info;
537 switch (meminfo_elem->mem_type) { 540 kva_info = &hal_meminfo->kva_info;
538 case BFA_MEM_TYPE_KVA: 541
539 vfree(meminfo_elem->kva); 542 /* Iterate through the KVA meminfo queue */
540 break; 543 list_for_each(km_qe, &kva_info->qe) {
541 case BFA_MEM_TYPE_DMA: 544 kva_elem = (struct bfa_mem_kva_s *) km_qe;
542 dma_free_coherent(&bfad->pcidev->dev, 545 vfree(kva_elem->kva);
543 meminfo_elem->mem_len, 546 }
544 meminfo_elem->kva, 547
545 (dma_addr_t) meminfo_elem->dma); 548 /* Iterate through the DMA meminfo queue */
546 break; 549 list_for_each(dm_qe, &dma_info->qe) {
547 default: 550 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
548 WARN_ON(1); 551 dma_free_coherent(&bfad->pcidev->dev,
549 break; 552 dma_elem->mem_len, dma_elem->kva,
550 } 553 (dma_addr_t) dma_elem->dma);
551 }
552 } 554 }
553 555
554 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); 556 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
@@ -563,15 +565,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
563 bfa_cfg->fwcfg.num_ioim_reqs = num_ios; 565 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
564 if (num_tms > 0) 566 if (num_tms > 0)
565 bfa_cfg->fwcfg.num_tskim_reqs = num_tms; 567 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
566 if (num_fcxps > 0) 568 if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
567 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; 569 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
568 if (num_ufbufs > 0) 570 if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
569 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; 571 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
570 if (reqq_size > 0) 572 if (reqq_size > 0)
571 bfa_cfg->drvcfg.num_reqq_elems = reqq_size; 573 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
572 if (rspq_size > 0) 574 if (rspq_size > 0)
573 bfa_cfg->drvcfg.num_rspq_elems = rspq_size; 575 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
574 if (num_sgpgs > 0) 576 if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
575 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; 577 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
576 578
577 /* 579 /*
@@ -591,85 +593,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
591bfa_status_t 593bfa_status_t
592bfad_hal_mem_alloc(struct bfad_s *bfad) 594bfad_hal_mem_alloc(struct bfad_s *bfad)
593{ 595{
594 int i;
595 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; 596 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
596 struct bfa_mem_elem_s *meminfo_elem; 597 struct bfa_mem_dma_s *dma_info, *dma_elem;
597 dma_addr_t phys_addr; 598 struct bfa_mem_kva_s *kva_info, *kva_elem;
598 void *kva; 599 struct list_head *dm_qe, *km_qe;
599 bfa_status_t rc = BFA_STATUS_OK; 600 bfa_status_t rc = BFA_STATUS_OK;
600 int retry_count = 0; 601 dma_addr_t phys_addr;
601 int reset_value = 1;
602 int min_num_sgpgs = 512;
603 602
604 bfa_cfg_get_default(&bfad->ioc_cfg); 603 bfa_cfg_get_default(&bfad->ioc_cfg);
605
606retry:
607 bfad_update_hal_cfg(&bfad->ioc_cfg); 604 bfad_update_hal_cfg(&bfad->ioc_cfg);
608 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; 605 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
609 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); 606 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
610 607
611 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { 608 dma_info = &hal_meminfo->dma_info;
612 meminfo_elem = &hal_meminfo->meminfo[i]; 609 kva_info = &hal_meminfo->kva_info;
613 switch (meminfo_elem->mem_type) { 610
614 case BFA_MEM_TYPE_KVA: 611 /* Iterate through the KVA meminfo queue */
615 kva = vmalloc(meminfo_elem->mem_len); 612 list_for_each(km_qe, &kva_info->qe) {
616 if (kva == NULL) { 613 kva_elem = (struct bfa_mem_kva_s *) km_qe;
617 bfad_hal_mem_release(bfad); 614 kva_elem->kva = vmalloc(kva_elem->mem_len);
618 rc = BFA_STATUS_ENOMEM; 615 if (kva_elem->kva == NULL) {
619 goto ext; 616 bfad_hal_mem_release(bfad);
620 } 617 rc = BFA_STATUS_ENOMEM;
621 memset(kva, 0, meminfo_elem->mem_len); 618 goto ext;
622 meminfo_elem->kva = kva; 619 }
623 break; 620 memset(kva_elem->kva, 0, kva_elem->mem_len);
624 case BFA_MEM_TYPE_DMA: 621 }
625 kva = dma_alloc_coherent(&bfad->pcidev->dev,
626 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
627 if (kva == NULL) {
628 bfad_hal_mem_release(bfad);
629 /*
630 * If we cannot allocate with default
631 * num_sgpages try with half the value.
632 */
633 if (num_sgpgs > min_num_sgpgs) {
634 printk(KERN_INFO
635 "bfad[%d]: memory allocation failed"
636 " with num_sgpgs: %d\n",
637 bfad->inst_no, num_sgpgs);
638 nextLowerInt(&num_sgpgs);
639 printk(KERN_INFO
640 "bfad[%d]: trying to allocate memory"
641 " with num_sgpgs: %d\n",
642 bfad->inst_no, num_sgpgs);
643 retry_count++;
644 goto retry;
645 } else {
646 if (num_sgpgs_parm > 0)
647 num_sgpgs = num_sgpgs_parm;
648 else {
649 reset_value =
650 (1 << retry_count);
651 num_sgpgs *= reset_value;
652 }
653 rc = BFA_STATUS_ENOMEM;
654 goto ext;
655 }
656 }
657
658 if (num_sgpgs_parm > 0)
659 num_sgpgs = num_sgpgs_parm;
660 else {
661 reset_value = (1 << retry_count);
662 num_sgpgs *= reset_value;
663 }
664
665 memset(kva, 0, meminfo_elem->mem_len);
666 meminfo_elem->kva = kva;
667 meminfo_elem->dma = phys_addr;
668 break;
669 default:
670 break;
671 622
623 /* Iterate through the DMA meminfo queue */
624 list_for_each(dm_qe, &dma_info->qe) {
625 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
626 dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
627 dma_elem->mem_len,
628 &phys_addr, GFP_KERNEL);
629 if (dma_elem->kva == NULL) {
630 bfad_hal_mem_release(bfad);
631 rc = BFA_STATUS_ENOMEM;
632 goto ext;
672 } 633 }
634 dma_elem->dma = phys_addr;
635 memset(dma_elem->kva, 0, dma_elem->mem_len);
673 } 636 }
674ext: 637ext:
675 return rc; 638 return rc;
@@ -780,13 +743,17 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
780 pci_set_master(pdev); 743 pci_set_master(pdev);
781 744
782 745
783 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 746 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
784 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 747 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
748 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
749 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
785 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); 750 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
786 goto out_release_region; 751 goto out_release_region;
787 } 752 }
753 }
788 754
789 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 755 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
756 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
790 757
791 if (bfad->pci_bar0_kva == NULL) { 758 if (bfad->pci_bar0_kva == NULL) {
792 printk(KERN_ERR "Fail to map bar0\n"); 759 printk(KERN_ERR "Fail to map bar0\n");
@@ -797,6 +764,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
797 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); 764 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
798 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; 765 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
799 bfad->hal_pcidev.device_id = pdev->device; 766 bfad->hal_pcidev.device_id = pdev->device;
767 bfad->hal_pcidev.ssid = pdev->subsystem_device;
800 bfad->pci_name = pci_name(pdev); 768 bfad->pci_name = pci_name(pdev);
801 769
802 bfad->pci_attr.vendor_id = pdev->vendor; 770 bfad->pci_attr.vendor_id = pdev->vendor;
@@ -868,6 +836,7 @@ void
868bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 836bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
869{ 837{
870 pci_iounmap(pdev, bfad->pci_bar0_kva); 838 pci_iounmap(pdev, bfad->pci_bar0_kva);
839 pci_iounmap(pdev, bfad->pci_bar2_kva);
871 pci_release_regions(pdev); 840 pci_release_regions(pdev);
872 pci_disable_device(pdev); 841 pci_disable_device(pdev);
873 pci_set_drvdata(pdev, NULL); 842 pci_set_drvdata(pdev, NULL);
@@ -908,12 +877,29 @@ bfad_drv_init(struct bfad_s *bfad)
908 bfad->bfa_fcs.trcmod = bfad->trcmod; 877 bfad->bfa_fcs.trcmod = bfad->trcmod;
909 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 878 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
910 bfad->bfa_fcs.fdmi_enabled = fdmi_enable; 879 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
880 bfa_fcs_init(&bfad->bfa_fcs);
911 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 881 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
912 882
913 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 883 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
914 884
885 /* configure base port */
886 rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
887 if (rc != BFA_STATUS_OK)
888 goto out_cfg_pport_fail;
889
915 return BFA_STATUS_OK; 890 return BFA_STATUS_OK;
916 891
892out_cfg_pport_fail:
893 /* fcs exit - on cfg pport failure */
894 spin_lock_irqsave(&bfad->bfad_lock, flags);
895 init_completion(&bfad->comp);
896 bfad->pport.flags |= BFAD_PORT_DELETE;
897 bfa_fcs_exit(&bfad->bfa_fcs);
898 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
899 wait_for_completion(&bfad->comp);
900 /* bfa detach - free hal memory */
901 bfa_detach(&bfad->bfa);
902 bfad_hal_mem_release(bfad);
917out_hal_mem_alloc_failure: 903out_hal_mem_alloc_failure:
918 return BFA_STATUS_FAILED; 904 return BFA_STATUS_FAILED;
919} 905}
@@ -945,6 +931,7 @@ bfad_drv_start(struct bfad_s *bfad)
945 931
946 spin_lock_irqsave(&bfad->bfad_lock, flags); 932 spin_lock_irqsave(&bfad->bfad_lock, flags);
947 bfa_iocfc_start(&bfad->bfa); 933 bfa_iocfc_start(&bfad->bfa);
934 bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
948 bfa_fcs_fabric_modstart(&bfad->bfa_fcs); 935 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
949 bfad->bfad_flags |= BFAD_HAL_START_DONE; 936 bfad->bfad_flags |= BFAD_HAL_START_DONE;
950 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -1032,6 +1019,12 @@ bfad_start_ops(struct bfad_s *bfad) {
1032 struct bfad_vport_s *vport, *vport_new; 1019 struct bfad_vport_s *vport, *vport_new;
1033 struct bfa_fcs_driver_info_s driver_info; 1020 struct bfa_fcs_driver_info_s driver_info;
1034 1021
1022 /* Limit min/max. xfer size to [64k-32MB] */
1023 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
1024 max_xfer_size = BFAD_MIN_SECTORS >> 1;
1025 if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
1026 max_xfer_size = BFAD_MAX_SECTORS >> 1;
1027
1035 /* Fill the driver_info info to fcs*/ 1028 /* Fill the driver_info info to fcs*/
1036 memset(&driver_info, 0, sizeof(driver_info)); 1029 memset(&driver_info, 0, sizeof(driver_info));
1037 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 1030 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
@@ -1049,19 +1042,19 @@ bfad_start_ops(struct bfad_s *bfad) {
1049 strncpy(driver_info.os_device_name, bfad->pci_name, 1042 strncpy(driver_info.os_device_name, bfad->pci_name,
1050 sizeof(driver_info.os_device_name - 1)); 1043 sizeof(driver_info.os_device_name - 1));
1051 1044
1052 /* FCS INIT */ 1045 /* FCS driver info init */
1053 spin_lock_irqsave(&bfad->bfad_lock, flags); 1046 spin_lock_irqsave(&bfad->bfad_lock, flags);
1054 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 1047 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1055 bfa_fcs_init(&bfad->bfa_fcs);
1056 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1048 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1057 1049
1058 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); 1050 /*
1059 if (retval != BFA_STATUS_OK) { 1051 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
1060 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) 1052 * with values learned during bfa_init firmware GETATTR REQ.
1061 bfa_sm_set_state(bfad, bfad_sm_failed); 1053 */
1062 bfad_stop(bfad); 1054 bfa_fcs_update_cfg(&bfad->bfa_fcs);
1063 return BFA_STATUS_FAILED; 1055
1064 } 1056 /* Setup fc host fixed attribute if the lk supports */
1057 bfad_fc_host_init(bfad->pport.im_port);
1065 1058
1066 /* BFAD level FC4 IM specific resource allocation */ 1059 /* BFAD level FC4 IM specific resource allocation */
1067 retval = bfad_im_probe(bfad); 1060 retval = bfad_im_probe(bfad);
@@ -1233,8 +1226,8 @@ bfad_install_msix_handler(struct bfad_s *bfad)
1233 for (i = 0; i < bfad->nvec; i++) { 1226 for (i = 0; i < bfad->nvec; i++) {
1234 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", 1227 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1235 bfad->pci_name, 1228 bfad->pci_name,
1236 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? 1229 ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1237 msix_name_ct[i] : msix_name_cb[i])); 1230 msix_name_cb[i] : msix_name_ct[i]));
1238 1231
1239 error = request_irq(bfad->msix_tab[i].msix.vector, 1232 error = request_irq(bfad->msix_tab[i].msix.vector,
1240 (irq_handler_t) bfad_msix, 0, 1233 (irq_handler_t) bfad_msix, 0,
@@ -1248,6 +1241,9 @@ bfad_install_msix_handler(struct bfad_s *bfad)
1248 free_irq(bfad->msix_tab[j].msix.vector, 1241 free_irq(bfad->msix_tab[j].msix.vector,
1249 &bfad->msix_tab[j]); 1242 &bfad->msix_tab[j]);
1250 1243
1244 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1245 pci_disable_msix(bfad->pcidev);
1246
1251 return 1; 1247 return 1;
1252 } 1248 }
1253 } 1249 }
@@ -1265,6 +1261,7 @@ bfad_setup_intr(struct bfad_s *bfad)
1265 u32 mask = 0, i, num_bit = 0, max_bit = 0; 1261 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1266 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 1262 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1267 struct pci_dev *pdev = bfad->pcidev; 1263 struct pci_dev *pdev = bfad->pcidev;
1264 u16 reg;
1268 1265
1269 /* Call BFA to get the msix map for this PCI function. */ 1266 /* Call BFA to get the msix map for this PCI function. */
1270 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); 1267 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -1272,8 +1269,8 @@ bfad_setup_intr(struct bfad_s *bfad)
1272 /* Set up the msix entry table */ 1269 /* Set up the msix entry table */
1273 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 1270 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1274 1271
1275 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || 1272 if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1276 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { 1273 (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
1277 1274
1278 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 1275 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1279 if (error) { 1276 if (error) {
@@ -1294,6 +1291,13 @@ bfad_setup_intr(struct bfad_s *bfad)
1294 goto line_based; 1291 goto line_based;
1295 } 1292 }
1296 1293
1294 /* Disable INTX in MSI-X mode */
1295 pci_read_config_word(pdev, PCI_COMMAND, &reg);
1296
1297 if (!(reg & PCI_COMMAND_INTX_DISABLE))
1298 pci_write_config_word(pdev, PCI_COMMAND,
1299 reg | PCI_COMMAND_INTX_DISABLE);
1300
1297 /* Save the vectors */ 1301 /* Save the vectors */
1298 for (i = 0; i < bfad->nvec; i++) { 1302 for (i = 0; i < bfad->nvec; i++) {
1299 bfa_trc(bfad, msix_entries[i].vector); 1303 bfa_trc(bfad, msix_entries[i].vector);
@@ -1315,6 +1319,7 @@ line_based:
1315 /* Enable interrupt handler failed */ 1319 /* Enable interrupt handler failed */
1316 return 1; 1320 return 1;
1317 } 1321 }
1322 bfad->bfad_flags |= BFAD_INTX_ON;
1318 1323
1319 return error; 1324 return error;
1320} 1325}
@@ -1331,7 +1336,7 @@ bfad_remove_intr(struct bfad_s *bfad)
1331 1336
1332 pci_disable_msix(bfad->pcidev); 1337 pci_disable_msix(bfad->pcidev);
1333 bfad->bfad_flags &= ~BFAD_MSIX_ON; 1338 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1334 } else { 1339 } else if (bfad->bfad_flags & BFAD_INTX_ON) {
1335 free_irq(bfad->pcidev->irq, bfad); 1340 free_irq(bfad->pcidev->irq, bfad);
1336 } 1341 }
1337} 1342}
@@ -1501,6 +1506,14 @@ struct pci_device_id bfad_id_table[] = {
1501 .class = (PCI_CLASS_SERIAL_FIBER << 8), 1506 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1502 .class_mask = ~0, 1507 .class_mask = ~0,
1503 }, 1508 },
1509 {
1510 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1511 .device = BFA_PCI_DEVICE_ID_CT2,
1512 .subvendor = PCI_ANY_ID,
1513 .subdevice = PCI_ANY_ID,
1514 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1515 .class_mask = ~0,
1516 },
1504 1517
1505 {0, 0}, 1518 {0, 0},
1506}; 1519};
@@ -1594,33 +1607,33 @@ out:
1594static u32 * 1607static u32 *
1595bfad_load_fwimg(struct pci_dev *pdev) 1608bfad_load_fwimg(struct pci_dev *pdev)
1596{ 1609{
1597 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { 1610 if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
1598 if (bfi_image_ct_fc_size == 0) 1611 if (bfi_image_ct2_size == 0)
1599 bfad_read_firmware(pdev, &bfi_image_ct_fc, 1612 bfad_read_firmware(pdev, &bfi_image_ct2,
1600 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); 1613 &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1601 return bfi_image_ct_fc; 1614 return bfi_image_ct2;
1602 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { 1615 } else if (bfa_asic_id_ct(pdev->device)) {
1603 if (bfi_image_ct_cna_size == 0) 1616 if (bfi_image_ct_size == 0)
1604 bfad_read_firmware(pdev, &bfi_image_ct_cna, 1617 bfad_read_firmware(pdev, &bfi_image_ct,
1605 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); 1618 &bfi_image_ct_size, BFAD_FW_FILE_CT);
1606 return bfi_image_ct_cna; 1619 return bfi_image_ct;
1607 } else { 1620 } else {
1608 if (bfi_image_cb_fc_size == 0) 1621 if (bfi_image_cb_size == 0)
1609 bfad_read_firmware(pdev, &bfi_image_cb_fc, 1622 bfad_read_firmware(pdev, &bfi_image_cb,
1610 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); 1623 &bfi_image_cb_size, BFAD_FW_FILE_CB);
1611 return bfi_image_cb_fc; 1624 return bfi_image_cb;
1612 } 1625 }
1613} 1626}
1614 1627
1615static void 1628static void
1616bfad_free_fwimg(void) 1629bfad_free_fwimg(void)
1617{ 1630{
1618 if (bfi_image_ct_fc_size && bfi_image_ct_fc) 1631 if (bfi_image_ct2_size && bfi_image_ct2)
1619 vfree(bfi_image_ct_fc); 1632 vfree(bfi_image_ct2);
1620 if (bfi_image_ct_cna_size && bfi_image_ct_cna) 1633 if (bfi_image_ct_size && bfi_image_ct)
1621 vfree(bfi_image_ct_cna); 1634 vfree(bfi_image_ct);
1622 if (bfi_image_cb_fc_size && bfi_image_cb_fc) 1635 if (bfi_image_cb_size && bfi_image_cb)
1623 vfree(bfi_image_cb_fc); 1636 vfree(bfi_image_cb);
1624} 1637}
1625 1638
1626module_init(bfad_init); 1639module_init(bfad_init);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index a94ea4235433..9d95844ab463 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -218,6 +218,9 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
218 case BFA_PORT_SPEED_10GBPS: 218 case BFA_PORT_SPEED_10GBPS:
219 fc_host_speed(shost) = FC_PORTSPEED_10GBIT; 219 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
220 break; 220 break;
221 case BFA_PORT_SPEED_16GBPS:
222 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
223 break;
221 case BFA_PORT_SPEED_8GBPS: 224 case BFA_PORT_SPEED_8GBPS:
222 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 225 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
223 break; 226 break;
@@ -580,6 +583,8 @@ struct fc_function_template bfad_im_fc_function_template = {
580 .vport_create = bfad_im_vport_create, 583 .vport_create = bfad_im_vport_create,
581 .vport_delete = bfad_im_vport_delete, 584 .vport_delete = bfad_im_vport_delete,
582 .vport_disable = bfad_im_vport_disable, 585 .vport_disable = bfad_im_vport_disable,
586 .bsg_request = bfad_im_bsg_request,
587 .bsg_timeout = bfad_im_bsg_timeout,
583}; 588};
584 589
585struct fc_function_template bfad_im_vport_fc_function_template = { 590struct fc_function_template bfad_im_vport_fc_function_template = {
@@ -674,8 +679,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
674 struct bfad_s *bfad = im_port->bfad; 679 struct bfad_s *bfad = im_port->bfad;
675 char model[BFA_ADAPTER_MODEL_NAME_LEN]; 680 char model[BFA_ADAPTER_MODEL_NAME_LEN];
676 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; 681 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
682 int nports = 0;
677 683
678 bfa_get_adapter_model(&bfad->bfa, model); 684 bfa_get_adapter_model(&bfad->bfa, model);
685 nports = bfa_get_nports(&bfad->bfa);
679 if (!strcmp(model, "Brocade-425")) 686 if (!strcmp(model, "Brocade-425"))
680 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 687 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
681 "Brocade 4Gbps PCIe dual port FC HBA"); 688 "Brocade 4Gbps PCIe dual port FC HBA");
@@ -684,10 +691,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
684 "Brocade 8Gbps PCIe dual port FC HBA"); 691 "Brocade 8Gbps PCIe dual port FC HBA");
685 else if (!strcmp(model, "Brocade-42B")) 692 else if (!strcmp(model, "Brocade-42B"))
686 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 693 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
687 "HP 4Gbps PCIe dual port FC HBA"); 694 "Brocade 4Gbps PCIe dual port FC HBA for HP");
688 else if (!strcmp(model, "Brocade-82B")) 695 else if (!strcmp(model, "Brocade-82B"))
689 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 696 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
690 "HP 8Gbps PCIe dual port FC HBA"); 697 "Brocade 8Gbps PCIe dual port FC HBA for HP");
691 else if (!strcmp(model, "Brocade-1010")) 698 else if (!strcmp(model, "Brocade-1010"))
692 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 699 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
693 "Brocade 10Gbps single port CNA"); 700 "Brocade 10Gbps single port CNA");
@@ -696,7 +703,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
696 "Brocade 10Gbps dual port CNA"); 703 "Brocade 10Gbps dual port CNA");
697 else if (!strcmp(model, "Brocade-1007")) 704 else if (!strcmp(model, "Brocade-1007"))
698 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 705 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
699 "Brocade 10Gbps CNA"); 706 "Brocade 10Gbps CNA for IBM Blade Center");
700 else if (!strcmp(model, "Brocade-415")) 707 else if (!strcmp(model, "Brocade-415"))
701 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 708 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
702 "Brocade 4Gbps PCIe single port FC HBA"); 709 "Brocade 4Gbps PCIe single port FC HBA");
@@ -705,17 +712,45 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
705 "Brocade 8Gbps PCIe single port FC HBA"); 712 "Brocade 8Gbps PCIe single port FC HBA");
706 else if (!strcmp(model, "Brocade-41B")) 713 else if (!strcmp(model, "Brocade-41B"))
707 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 714 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
708 "HP 4Gbps PCIe single port FC HBA"); 715 "Brocade 4Gbps PCIe single port FC HBA for HP");
709 else if (!strcmp(model, "Brocade-81B")) 716 else if (!strcmp(model, "Brocade-81B"))
710 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 717 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
711 "HP 8Gbps PCIe single port FC HBA"); 718 "Brocade 8Gbps PCIe single port FC HBA for HP");
712 else if (!strcmp(model, "Brocade-804")) 719 else if (!strcmp(model, "Brocade-804"))
713 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 720 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
714 "HP Bladesystem C-class 8Gbps FC HBA"); 721 "Brocade 8Gbps FC HBA for HP Bladesystem C-class");
715 else if (!strcmp(model, "Brocade-902")) 722 else if (!strcmp(model, "Brocade-902") ||
723 !strcmp(model, "Brocade-1741"))
716 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 724 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
717 "Brocade 10Gbps CNA"); 725 "Brocade 10Gbps CNA for Dell M-Series Blade Servers");
718 else 726 else if (strstr(model, "Brocade-1560")) {
727 if (nports == 1)
728 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
729 "Brocade 16Gbps PCIe single port FC HBA");
730 else
731 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
732 "Brocade 16Gbps PCIe dual port FC HBA");
733 } else if (strstr(model, "Brocade-1710")) {
734 if (nports == 1)
735 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
736 "Brocade 10Gbps single port CNA");
737 else
738 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
739 "Brocade 10Gbps dual port CNA");
740 } else if (strstr(model, "Brocade-1860")) {
741 if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
742 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
743 "Brocade 10Gbps single port CNA");
744 else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
745 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
746 "Brocade 16Gbps PCIe single port FC HBA");
747 else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
748 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
749 "Brocade 10Gbps dual port CNA");
750 else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
751 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
752 "Brocade 16Gbps PCIe dual port FC HBA");
753 } else
719 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 754 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
720 "Invalid Model"); 755 "Invalid Model");
721 756
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644
index 000000000000..89f863ed2334
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -0,0 +1,2163 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/uaccess.h>
19#include "bfad_drv.h"
20#include "bfad_im.h"
21#include "bfad_bsg.h"
22
23BFA_TRC_FILE(LDRV, BSG);
24
25int
26bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
27{
28 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
29 int rc = 0;
30 unsigned long flags;
31
32 spin_lock_irqsave(&bfad->bfad_lock, flags);
33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 iocmd->status = BFA_STATUS_IOC_FAILURE;
37 return rc;
38 }
39
40 init_completion(&bfad->enable_comp);
41 bfa_iocfc_enable(&bfad->bfa);
42 iocmd->status = BFA_STATUS_OK;
43 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 wait_for_completion(&bfad->enable_comp);
45
46 return rc;
47}
48
49int
50bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
51{
52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53 int rc = 0;
54 unsigned long flags;
55
56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 return EBUSY;
60 }
61
62 bfad->disable_active = BFA_TRUE;
63 init_completion(&bfad->disable_comp);
64 bfa_iocfc_disable(&bfad->bfa);
65 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
66
67 wait_for_completion(&bfad->disable_comp);
68 bfad->disable_active = BFA_FALSE;
69 iocmd->status = BFA_STATUS_OK;
70
71 return rc;
72}
73
74static int
75bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
76{
77 int i;
78 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
79 struct bfad_im_port_s *im_port;
80 struct bfa_port_attr_s pattr;
81 unsigned long flags;
82
83 spin_lock_irqsave(&bfad->bfad_lock, flags);
84 bfa_fcport_get_attr(&bfad->bfa, &pattr);
85 iocmd->nwwn = pattr.nwwn;
86 iocmd->pwwn = pattr.pwwn;
87 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
88 iocmd->mac = bfa_get_mac(&bfad->bfa);
89 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
91 iocmd->factorynwwn = pattr.factorynwwn;
92 iocmd->factorypwwn = pattr.factorypwwn;
93 im_port = bfad->pport.im_port;
94 iocmd->host = im_port->shost->host_no;
95 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
96
97 strcpy(iocmd->name, bfad->adapter_name);
98 strcpy(iocmd->port_name, bfad->port_name);
99 strcpy(iocmd->hwpath, bfad->pci_name);
100
101 /* set adapter hw path */
102 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
103 i = strlen(iocmd->adapter_hwpath) - 1;
104 while (iocmd->adapter_hwpath[i] != '.')
105 i--;
106 iocmd->adapter_hwpath[i] = '\0';
107 iocmd->status = BFA_STATUS_OK;
108 return 0;
109}
110
111static int
112bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
113{
114 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
115 unsigned long flags;
116
117 spin_lock_irqsave(&bfad->bfad_lock, flags);
118 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
119 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
120
121 /* fill in driver attr info */
122 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
123 strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
124 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
125 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
126 iocmd->ioc_attr.adapter_attr.fw_ver);
127 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
128 iocmd->ioc_attr.adapter_attr.optrom_ver);
129
130 /* copy chip rev info first otherwise it will be overwritten */
131 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
132 sizeof(bfad->pci_attr.chip_rev));
133 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
134 sizeof(struct bfa_ioc_pci_attr_s));
135
136 iocmd->status = BFA_STATUS_OK;
137 return 0;
138}
139
140int
141bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
142{
143 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
144
145 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
146 iocmd->status = BFA_STATUS_OK;
147 return 0;
148}
149
150int
151bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
152 unsigned int payload_len)
153{
154 struct bfa_bsg_ioc_fwstats_s *iocmd =
155 (struct bfa_bsg_ioc_fwstats_s *)cmd;
156 void *iocmd_bufptr;
157 unsigned long flags;
158
159 if (bfad_chk_iocmd_sz(payload_len,
160 sizeof(struct bfa_bsg_ioc_fwstats_s),
161 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
162 iocmd->status = BFA_STATUS_VERSION_FAIL;
163 goto out;
164 }
165
166 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
167 spin_lock_irqsave(&bfad->bfad_lock, flags);
168 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
169 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
170
171 if (iocmd->status != BFA_STATUS_OK) {
172 bfa_trc(bfad, iocmd->status);
173 goto out;
174 }
175out:
176 bfa_trc(bfad, 0x6666);
177 return 0;
178}
179
180int
181bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
182{
183 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
184
185 iocmd->status = BFA_STATUS_OK;
186 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
187
188 return 0;
189}
190
191int
192bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
193{
194 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
195 unsigned long flags;
196
197 spin_lock_irqsave(&bfad->bfad_lock, flags);
198 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
199 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
200
201 return 0;
202}
203
204int
205bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
206{
207 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
208 struct bfad_hal_comp fcomp;
209 unsigned long flags;
210
211 init_completion(&fcomp.comp);
212 spin_lock_irqsave(&bfad->bfad_lock, flags);
213 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
214 bfad_hcb_comp, &fcomp);
215 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
216 if (iocmd->status != BFA_STATUS_OK) {
217 bfa_trc(bfad, iocmd->status);
218 return 0;
219 }
220 wait_for_completion(&fcomp.comp);
221 iocmd->status = fcomp.status;
222 return 0;
223}
224
225int
226bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
227{
228 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
229 struct bfad_hal_comp fcomp;
230 unsigned long flags;
231
232 init_completion(&fcomp.comp);
233 spin_lock_irqsave(&bfad->bfad_lock, flags);
234 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
235 bfad_hcb_comp, &fcomp);
236 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
237
238 if (iocmd->status != BFA_STATUS_OK) {
239 bfa_trc(bfad, iocmd->status);
240 return 0;
241 }
242 wait_for_completion(&fcomp.comp);
243 iocmd->status = fcomp.status;
244 return 0;
245}
246
247static int
248bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
249{
250 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
251 struct bfa_lport_attr_s port_attr;
252 unsigned long flags;
253
254 spin_lock_irqsave(&bfad->bfad_lock, flags);
255 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
256 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
257 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
258
259 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
260 iocmd->attr.pid = port_attr.pid;
261 else
262 iocmd->attr.pid = 0;
263
264 iocmd->attr.port_type = port_attr.port_type;
265 iocmd->attr.loopback = port_attr.loopback;
266 iocmd->attr.authfail = port_attr.authfail;
267 strncpy(iocmd->attr.port_symname.symname,
268 port_attr.port_cfg.sym_name.symname,
269 sizeof(port_attr.port_cfg.sym_name.symname));
270
271 iocmd->status = BFA_STATUS_OK;
272 return 0;
273}
274
275int
276bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
277 unsigned int payload_len)
278{
279 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
280 struct bfad_hal_comp fcomp;
281 void *iocmd_bufptr;
282 unsigned long flags;
283
284 if (bfad_chk_iocmd_sz(payload_len,
285 sizeof(struct bfa_bsg_port_stats_s),
286 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
287 iocmd->status = BFA_STATUS_VERSION_FAIL;
288 return 0;
289 }
290
291 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
292
293 init_completion(&fcomp.comp);
294 spin_lock_irqsave(&bfad->bfad_lock, flags);
295 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
296 iocmd_bufptr, bfad_hcb_comp, &fcomp);
297 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
298 if (iocmd->status != BFA_STATUS_OK) {
299 bfa_trc(bfad, iocmd->status);
300 goto out;
301 }
302
303 wait_for_completion(&fcomp.comp);
304 iocmd->status = fcomp.status;
305out:
306 return 0;
307}
308
309static int
310bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
311{
312 struct bfa_fcs_lport_s *fcs_port;
313 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
314 unsigned long flags;
315
316 spin_lock_irqsave(&bfad->bfad_lock, flags);
317 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
318 iocmd->vf_id, iocmd->pwwn);
319 if (fcs_port == NULL) {
320 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
321 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
322 goto out;
323 }
324
325 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
326 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
327 iocmd->status = BFA_STATUS_OK;
328out:
329 return 0;
330}
331
332int
333bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
334{
335 struct bfa_fcs_lport_s *fcs_port;
336 struct bfa_bsg_lport_stats_s *iocmd =
337 (struct bfa_bsg_lport_stats_s *)cmd;
338 unsigned long flags;
339
340 spin_lock_irqsave(&bfad->bfad_lock, flags);
341 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
342 iocmd->vf_id, iocmd->pwwn);
343 if (fcs_port == NULL) {
344 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
345 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
346 goto out;
347 }
348
349 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
350 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
351 iocmd->status = BFA_STATUS_OK;
352out:
353 return 0;
354}
355
356int
357bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
358{
359 struct bfa_fcs_lport_s *fcs_port;
360 struct bfa_bsg_lport_iostats_s *iocmd =
361 (struct bfa_bsg_lport_iostats_s *)cmd;
362 unsigned long flags;
363
364 spin_lock_irqsave(&bfad->bfad_lock, flags);
365 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
366 iocmd->vf_id, iocmd->pwwn);
367 if (fcs_port == NULL) {
368 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
369 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
370 goto out;
371 }
372
373 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
374 fcs_port->lp_tag);
375 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
376 iocmd->status = BFA_STATUS_OK;
377out:
378 return 0;
379}
380
381int
382bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
383 unsigned int payload_len)
384{
385 struct bfa_bsg_lport_get_rports_s *iocmd =
386 (struct bfa_bsg_lport_get_rports_s *)cmd;
387 struct bfa_fcs_lport_s *fcs_port;
388 unsigned long flags;
389 void *iocmd_bufptr;
390
391 if (iocmd->nrports == 0)
392 return EINVAL;
393
394 if (bfad_chk_iocmd_sz(payload_len,
395 sizeof(struct bfa_bsg_lport_get_rports_s),
396 sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
397 iocmd->status = BFA_STATUS_VERSION_FAIL;
398 return 0;
399 }
400
401 iocmd_bufptr = (char *)iocmd +
402 sizeof(struct bfa_bsg_lport_get_rports_s);
403 spin_lock_irqsave(&bfad->bfad_lock, flags);
404 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
405 iocmd->vf_id, iocmd->pwwn);
406 if (fcs_port == NULL) {
407 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
408 bfa_trc(bfad, 0);
409 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
410 goto out;
411 }
412
413 bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
414 &iocmd->nrports);
415 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
416 iocmd->status = BFA_STATUS_OK;
417out:
418 return 0;
419}
420
421int
422bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
423{
424 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
425 struct bfa_fcs_lport_s *fcs_port;
426 struct bfa_fcs_rport_s *fcs_rport;
427 unsigned long flags;
428
429 spin_lock_irqsave(&bfad->bfad_lock, flags);
430 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
431 iocmd->vf_id, iocmd->pwwn);
432 if (fcs_port == NULL) {
433 bfa_trc(bfad, 0);
434 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
435 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
436 goto out;
437 }
438
439 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
440 if (fcs_rport == NULL) {
441 bfa_trc(bfad, 0);
442 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
443 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
444 goto out;
445 }
446
447 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
448 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
449 iocmd->status = BFA_STATUS_OK;
450out:
451 return 0;
452}
453
454static int
455bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
456{
457 struct bfa_bsg_rport_scsi_addr_s *iocmd =
458 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
459 struct bfa_fcs_lport_s *fcs_port;
460 struct bfa_fcs_itnim_s *fcs_itnim;
461 struct bfad_itnim_s *drv_itnim;
462 unsigned long flags;
463
464 spin_lock_irqsave(&bfad->bfad_lock, flags);
465 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
466 iocmd->vf_id, iocmd->pwwn);
467 if (fcs_port == NULL) {
468 bfa_trc(bfad, 0);
469 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
470 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
471 goto out;
472 }
473
474 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
475 if (fcs_itnim == NULL) {
476 bfa_trc(bfad, 0);
477 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
478 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
479 goto out;
480 }
481
482 drv_itnim = fcs_itnim->itnim_drv;
483
484 if (drv_itnim && drv_itnim->im_port)
485 iocmd->host = drv_itnim->im_port->shost->host_no;
486 else {
487 bfa_trc(bfad, 0);
488 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
489 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
490 goto out;
491 }
492
493 iocmd->target = drv_itnim->scsi_tgt_id;
494 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
495
496 iocmd->bus = 0;
497 iocmd->lun = 0;
498 iocmd->status = BFA_STATUS_OK;
499out:
500 return 0;
501}
502
503int
504bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
505{
506 struct bfa_bsg_rport_stats_s *iocmd =
507 (struct bfa_bsg_rport_stats_s *)cmd;
508 struct bfa_fcs_lport_s *fcs_port;
509 struct bfa_fcs_rport_s *fcs_rport;
510 unsigned long flags;
511
512 spin_lock_irqsave(&bfad->bfad_lock, flags);
513 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
514 iocmd->vf_id, iocmd->pwwn);
515 if (fcs_port == NULL) {
516 bfa_trc(bfad, 0);
517 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
518 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
519 goto out;
520 }
521
522 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
523 if (fcs_rport == NULL) {
524 bfa_trc(bfad, 0);
525 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
526 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
527 goto out;
528 }
529
530 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
531 sizeof(struct bfa_rport_stats_s));
532 memcpy((void *)&iocmd->stats.hal_stats,
533 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
534 sizeof(struct bfa_rport_hal_stats_s));
535
536 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
537 iocmd->status = BFA_STATUS_OK;
538out:
539 return 0;
540}
541
542static int
543bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
544 unsigned int payload_len)
545{
546 struct bfa_bsg_fabric_get_lports_s *iocmd =
547 (struct bfa_bsg_fabric_get_lports_s *)cmd;
548 bfa_fcs_vf_t *fcs_vf;
549 uint32_t nports = iocmd->nports;
550 unsigned long flags;
551 void *iocmd_bufptr;
552
553 if (nports == 0) {
554 iocmd->status = BFA_STATUS_EINVAL;
555 goto out;
556 }
557
558 if (bfad_chk_iocmd_sz(payload_len,
559 sizeof(struct bfa_bsg_fabric_get_lports_s),
560 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
561 iocmd->status = BFA_STATUS_VERSION_FAIL;
562 goto out;
563 }
564
565 iocmd_bufptr = (char *)iocmd +
566 sizeof(struct bfa_bsg_fabric_get_lports_s);
567
568 spin_lock_irqsave(&bfad->bfad_lock, flags);
569 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
570 if (fcs_vf == NULL) {
571 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
572 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
573 goto out;
574 }
575 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
576 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
577
578 iocmd->nports = nports;
579 iocmd->status = BFA_STATUS_OK;
580out:
581 return 0;
582}
583
584int
585bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
586{
587 struct bfa_bsg_fcpim_modstats_s *iocmd =
588 (struct bfa_bsg_fcpim_modstats_s *)cmd;
589 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
590 struct list_head *qe, *qen;
591 struct bfa_itnim_s *itnim;
592 unsigned long flags;
593
594 spin_lock_irqsave(&bfad->bfad_lock, flags);
595 /* accumulate IO stats from itnim */
596 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
597 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
598 itnim = (struct bfa_itnim_s *) qe;
599 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
600 }
601 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
602 iocmd->status = BFA_STATUS_OK;
603 return 0;
604}
605
606int
607bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
608{
609 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
610 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
611 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
612 unsigned long flags;
613
614 spin_lock_irqsave(&bfad->bfad_lock, flags);
615 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
616 sizeof(struct bfa_fcpim_del_itn_stats_s));
617 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
618
619 iocmd->status = BFA_STATUS_OK;
620 return 0;
621}
622
623static int
624bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
625{
626 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
627 struct bfa_fcs_lport_s *fcs_port;
628 unsigned long flags;
629
630 spin_lock_irqsave(&bfad->bfad_lock, flags);
631 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
632 iocmd->vf_id, iocmd->lpwwn);
633 if (!fcs_port)
634 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
635 else
636 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
637 iocmd->rpwwn, &iocmd->attr);
638 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
639 return 0;
640}
641
642static int
643bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
644{
645 struct bfa_bsg_itnim_iostats_s *iocmd =
646 (struct bfa_bsg_itnim_iostats_s *)cmd;
647 struct bfa_fcs_lport_s *fcs_port;
648 struct bfa_fcs_itnim_s *itnim;
649 unsigned long flags;
650
651 spin_lock_irqsave(&bfad->bfad_lock, flags);
652 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
653 iocmd->vf_id, iocmd->lpwwn);
654 if (!fcs_port) {
655 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
656 bfa_trc(bfad, 0);
657 } else {
658 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
659 if (itnim == NULL)
660 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
661 else {
662 iocmd->status = BFA_STATUS_OK;
663 memcpy((void *)&iocmd->iostats, (void *)
664 &(bfa_fcs_itnim_get_halitn(itnim)->stats),
665 sizeof(struct bfa_itnim_iostats_s));
666 }
667 }
668 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
669 return 0;
670}
671
672static int
673bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
674{
675 struct bfa_bsg_itnim_itnstats_s *iocmd =
676 (struct bfa_bsg_itnim_itnstats_s *)cmd;
677 struct bfa_fcs_lport_s *fcs_port;
678 struct bfa_fcs_itnim_s *itnim;
679 unsigned long flags;
680
681 spin_lock_irqsave(&bfad->bfad_lock, flags);
682 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
683 iocmd->vf_id, iocmd->lpwwn);
684 if (!fcs_port) {
685 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
686 bfa_trc(bfad, 0);
687 } else {
688 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
689 if (itnim == NULL)
690 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
691 else {
692 iocmd->status = BFA_STATUS_OK;
693 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
694 &iocmd->itnstats);
695 }
696 }
697 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
698 return 0;
699}
700
701int
702bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
703{
704 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
705 unsigned long flags;
706
707 spin_lock_irqsave(&bfad->bfad_lock, flags);
708 iocmd->status = bfa_fcport_enable(&bfad->bfa);
709 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
710
711 return 0;
712}
713
714int
715bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
716{
717 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
718 unsigned long flags;
719
720 spin_lock_irqsave(&bfad->bfad_lock, flags);
721 iocmd->status = bfa_fcport_disable(&bfad->bfa);
722 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
723
724 return 0;
725}
726
727int
728bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
729{
730 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
731 struct bfad_hal_comp fcomp;
732 unsigned long flags;
733
734 init_completion(&fcomp.comp);
735 spin_lock_irqsave(&bfad->bfad_lock, flags);
736 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
737 &iocmd->pcifn_cfg,
738 bfad_hcb_comp, &fcomp);
739 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
740 if (iocmd->status != BFA_STATUS_OK)
741 goto out;
742
743 wait_for_completion(&fcomp.comp);
744 iocmd->status = fcomp.status;
745out:
746 return 0;
747}
748
749int
750bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
751{
752 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
753 struct bfad_hal_comp fcomp;
754 unsigned long flags;
755
756 init_completion(&fcomp.comp);
757 spin_lock_irqsave(&bfad->bfad_lock, flags);
758 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
759 &iocmd->pcifn_id, iocmd->port,
760 iocmd->pcifn_class, iocmd->bandwidth,
761 bfad_hcb_comp, &fcomp);
762 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
763 if (iocmd->status != BFA_STATUS_OK)
764 goto out;
765
766 wait_for_completion(&fcomp.comp);
767 iocmd->status = fcomp.status;
768out:
769 return 0;
770}
771
772int
773bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
774{
775 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
776 struct bfad_hal_comp fcomp;
777 unsigned long flags;
778
779 init_completion(&fcomp.comp);
780 spin_lock_irqsave(&bfad->bfad_lock, flags);
781 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
782 iocmd->pcifn_id,
783 bfad_hcb_comp, &fcomp);
784 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
785 if (iocmd->status != BFA_STATUS_OK)
786 goto out;
787
788 wait_for_completion(&fcomp.comp);
789 iocmd->status = fcomp.status;
790out:
791 return 0;
792}
793
794int
795bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
796{
797 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
798 struct bfad_hal_comp fcomp;
799 unsigned long flags;
800
801 init_completion(&fcomp.comp);
802 spin_lock_irqsave(&bfad->bfad_lock, flags);
803 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
804 iocmd->pcifn_id, iocmd->bandwidth,
805 bfad_hcb_comp, &fcomp);
806 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
807 bfa_trc(bfad, iocmd->status);
808 if (iocmd->status != BFA_STATUS_OK)
809 goto out;
810
811 wait_for_completion(&fcomp.comp);
812 iocmd->status = fcomp.status;
813 bfa_trc(bfad, iocmd->status);
814out:
815 return 0;
816}
817
818int
819bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
820{
821 struct bfa_bsg_adapter_cfg_mode_s *iocmd =
822 (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
823 struct bfad_hal_comp fcomp;
824 unsigned long flags = 0;
825
826 init_completion(&fcomp.comp);
827 spin_lock_irqsave(&bfad->bfad_lock, flags);
828 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
829 iocmd->cfg.mode, iocmd->cfg.max_pf,
830 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
831 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
832 if (iocmd->status != BFA_STATUS_OK)
833 goto out;
834
835 wait_for_completion(&fcomp.comp);
836 iocmd->status = fcomp.status;
837out:
838 return 0;
839}
840
841int
842bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
843{
844 struct bfa_bsg_port_cfg_mode_s *iocmd =
845 (struct bfa_bsg_port_cfg_mode_s *)cmd;
846 struct bfad_hal_comp fcomp;
847 unsigned long flags = 0;
848
849 init_completion(&fcomp.comp);
850 spin_lock_irqsave(&bfad->bfad_lock, flags);
851 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
852 iocmd->instance, iocmd->cfg.mode,
853 iocmd->cfg.max_pf, iocmd->cfg.max_vf,
854 bfad_hcb_comp, &fcomp);
855 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
856 if (iocmd->status != BFA_STATUS_OK)
857 goto out;
858
859 wait_for_completion(&fcomp.comp);
860 iocmd->status = fcomp.status;
861out:
862 return 0;
863}
864
865int
866bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
867{
868 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
869 struct bfad_hal_comp fcomp;
870 unsigned long flags;
871
872 init_completion(&fcomp.comp);
873 spin_lock_irqsave(&bfad->bfad_lock, flags);
874 if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
875 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
876 bfad_hcb_comp, &fcomp);
877 else
878 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
879 bfad_hcb_comp, &fcomp);
880 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
881
882 if (iocmd->status != BFA_STATUS_OK)
883 goto out;
884
885 wait_for_completion(&fcomp.comp);
886 iocmd->status = fcomp.status;
887out:
888 return 0;
889}
890
891int
892bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
893{
894 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
895 unsigned long flags;
896 struct bfad_hal_comp fcomp;
897
898 init_completion(&fcomp.comp);
899 iocmd->status = BFA_STATUS_OK;
900 spin_lock_irqsave(&bfad->bfad_lock, flags);
901 iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
902 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
903
904 if (iocmd->status != BFA_STATUS_OK)
905 goto out;
906
907 wait_for_completion(&fcomp.comp);
908 iocmd->status = fcomp.status;
909out:
910 return 0;
911}
912
913int
914bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
915{
916 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
917 unsigned long flags;
918 struct bfad_hal_comp fcomp;
919
920 init_completion(&fcomp.comp);
921 iocmd->status = BFA_STATUS_OK;
922 spin_lock_irqsave(&bfad->bfad_lock, flags);
923 iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
924 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
925
926 if (iocmd->status != BFA_STATUS_OK)
927 goto out;
928
929 wait_for_completion(&fcomp.comp);
930 iocmd->status = fcomp.status;
931out:
932 return 0;
933}
934
935int
936bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
937{
938 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
939 struct bfad_hal_comp fcomp;
940 unsigned long flags;
941
942 init_completion(&fcomp.comp);
943 iocmd->status = BFA_STATUS_OK;
944 spin_lock_irqsave(&bfad->bfad_lock, flags);
945 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
946 bfad_hcb_comp, &fcomp);
947 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
948
949 if (iocmd->status != BFA_STATUS_OK)
950 goto out;
951
952 wait_for_completion(&fcomp.comp);
953 iocmd->status = fcomp.status;
954out:
955 return 0;
956}
957
958int
959bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
960{
961 struct bfa_bsg_cee_attr_s *iocmd =
962 (struct bfa_bsg_cee_attr_s *)cmd;
963 void *iocmd_bufptr;
964 struct bfad_hal_comp cee_comp;
965 unsigned long flags;
966
967 if (bfad_chk_iocmd_sz(payload_len,
968 sizeof(struct bfa_bsg_cee_attr_s),
969 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
970 iocmd->status = BFA_STATUS_VERSION_FAIL;
971 return 0;
972 }
973
974 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
975
976 cee_comp.status = 0;
977 init_completion(&cee_comp.comp);
978 mutex_lock(&bfad_mutex);
979 spin_lock_irqsave(&bfad->bfad_lock, flags);
980 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
981 bfad_hcb_comp, &cee_comp);
982 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
983 if (iocmd->status != BFA_STATUS_OK) {
984 mutex_unlock(&bfad_mutex);
985 bfa_trc(bfad, 0x5555);
986 goto out;
987 }
988 wait_for_completion(&cee_comp.comp);
989 mutex_unlock(&bfad_mutex);
990out:
991 return 0;
992}
993
994int
995bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
996 unsigned int payload_len)
997{
998 struct bfa_bsg_cee_stats_s *iocmd =
999 (struct bfa_bsg_cee_stats_s *)cmd;
1000 void *iocmd_bufptr;
1001 struct bfad_hal_comp cee_comp;
1002 unsigned long flags;
1003
1004 if (bfad_chk_iocmd_sz(payload_len,
1005 sizeof(struct bfa_bsg_cee_stats_s),
1006 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1007 iocmd->status = BFA_STATUS_VERSION_FAIL;
1008 return 0;
1009 }
1010
1011 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1012
1013 cee_comp.status = 0;
1014 init_completion(&cee_comp.comp);
1015 mutex_lock(&bfad_mutex);
1016 spin_lock_irqsave(&bfad->bfad_lock, flags);
1017 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1018 bfad_hcb_comp, &cee_comp);
1019 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1020 if (iocmd->status != BFA_STATUS_OK) {
1021 mutex_unlock(&bfad_mutex);
1022 bfa_trc(bfad, 0x5555);
1023 goto out;
1024 }
1025 wait_for_completion(&cee_comp.comp);
1026 mutex_unlock(&bfad_mutex);
1027out:
1028 return 0;
1029}
1030
1031int
1032bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1033{
1034 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1035 unsigned long flags;
1036
1037 spin_lock_irqsave(&bfad->bfad_lock, flags);
1038 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1039 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1040 if (iocmd->status != BFA_STATUS_OK)
1041 bfa_trc(bfad, 0x5555);
1042 return 0;
1043}
1044
1045int
1046bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1047{
1048 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1049 struct bfad_hal_comp fcomp;
1050 unsigned long flags;
1051
1052 init_completion(&fcomp.comp);
1053 spin_lock_irqsave(&bfad->bfad_lock, flags);
1054 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1055 bfad_hcb_comp, &fcomp);
1056 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1057 bfa_trc(bfad, iocmd->status);
1058 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1059 goto out;
1060
1061 wait_for_completion(&fcomp.comp);
1062 iocmd->status = fcomp.status;
1063out:
1064 return 0;
1065}
1066
1067int
1068bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1069{
1070 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1071 struct bfad_hal_comp fcomp;
1072 unsigned long flags;
1073
1074 init_completion(&fcomp.comp);
1075 spin_lock_irqsave(&bfad->bfad_lock, flags);
1076 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1077 bfad_hcb_comp, &fcomp);
1078 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1079 bfa_trc(bfad, iocmd->status);
1080 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1081 goto out;
1082 wait_for_completion(&fcomp.comp);
1083 iocmd->status = fcomp.status;
1084out:
1085 return 0;
1086}
1087
1088int
1089bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1090{
1091 struct bfa_bsg_flash_attr_s *iocmd =
1092 (struct bfa_bsg_flash_attr_s *)cmd;
1093 struct bfad_hal_comp fcomp;
1094 unsigned long flags;
1095
1096 init_completion(&fcomp.comp);
1097 spin_lock_irqsave(&bfad->bfad_lock, flags);
1098 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1099 bfad_hcb_comp, &fcomp);
1100 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1101 if (iocmd->status != BFA_STATUS_OK)
1102 goto out;
1103 wait_for_completion(&fcomp.comp);
1104 iocmd->status = fcomp.status;
1105out:
1106 return 0;
1107}
1108
1109int
1110bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1111{
1112 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1113 struct bfad_hal_comp fcomp;
1114 unsigned long flags;
1115
1116 init_completion(&fcomp.comp);
1117 spin_lock_irqsave(&bfad->bfad_lock, flags);
1118 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1119 iocmd->instance, bfad_hcb_comp, &fcomp);
1120 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1121 if (iocmd->status != BFA_STATUS_OK)
1122 goto out;
1123 wait_for_completion(&fcomp.comp);
1124 iocmd->status = fcomp.status;
1125out:
1126 return 0;
1127}
1128
1129int
1130bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1131 unsigned int payload_len)
1132{
1133 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1134 void *iocmd_bufptr;
1135 struct bfad_hal_comp fcomp;
1136 unsigned long flags;
1137
1138 if (bfad_chk_iocmd_sz(payload_len,
1139 sizeof(struct bfa_bsg_flash_s),
1140 iocmd->bufsz) != BFA_STATUS_OK) {
1141 iocmd->status = BFA_STATUS_VERSION_FAIL;
1142 return 0;
1143 }
1144
1145 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1146
1147 init_completion(&fcomp.comp);
1148 spin_lock_irqsave(&bfad->bfad_lock, flags);
1149 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1150 iocmd->type, iocmd->instance, iocmd_bufptr,
1151 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1152 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1153 if (iocmd->status != BFA_STATUS_OK)
1154 goto out;
1155 wait_for_completion(&fcomp.comp);
1156 iocmd->status = fcomp.status;
1157out:
1158 return 0;
1159}
1160
1161int
1162bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1163 unsigned int payload_len)
1164{
1165 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1166 struct bfad_hal_comp fcomp;
1167 void *iocmd_bufptr;
1168 unsigned long flags;
1169
1170 if (bfad_chk_iocmd_sz(payload_len,
1171 sizeof(struct bfa_bsg_flash_s),
1172 iocmd->bufsz) != BFA_STATUS_OK) {
1173 iocmd->status = BFA_STATUS_VERSION_FAIL;
1174 return 0;
1175 }
1176
1177 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1178
1179 init_completion(&fcomp.comp);
1180 spin_lock_irqsave(&bfad->bfad_lock, flags);
1181 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1182 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1183 bfad_hcb_comp, &fcomp);
1184 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1185 if (iocmd->status != BFA_STATUS_OK)
1186 goto out;
1187 wait_for_completion(&fcomp.comp);
1188 iocmd->status = fcomp.status;
1189out:
1190 return 0;
1191}
1192
1193int
1194bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1195{
1196 struct bfa_bsg_diag_get_temp_s *iocmd =
1197 (struct bfa_bsg_diag_get_temp_s *)cmd;
1198 struct bfad_hal_comp fcomp;
1199 unsigned long flags;
1200
1201 init_completion(&fcomp.comp);
1202 spin_lock_irqsave(&bfad->bfad_lock, flags);
1203 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1204 &iocmd->result, bfad_hcb_comp, &fcomp);
1205 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1206 bfa_trc(bfad, iocmd->status);
1207 if (iocmd->status != BFA_STATUS_OK)
1208 goto out;
1209 wait_for_completion(&fcomp.comp);
1210 iocmd->status = fcomp.status;
1211out:
1212 return 0;
1213}
1214
1215int
1216bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1217{
1218 struct bfa_bsg_diag_memtest_s *iocmd =
1219 (struct bfa_bsg_diag_memtest_s *)cmd;
1220 struct bfad_hal_comp fcomp;
1221 unsigned long flags;
1222
1223 init_completion(&fcomp.comp);
1224 spin_lock_irqsave(&bfad->bfad_lock, flags);
1225 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1226 &iocmd->memtest, iocmd->pat,
1227 &iocmd->result, bfad_hcb_comp, &fcomp);
1228 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1229 bfa_trc(bfad, iocmd->status);
1230 if (iocmd->status != BFA_STATUS_OK)
1231 goto out;
1232 wait_for_completion(&fcomp.comp);
1233 iocmd->status = fcomp.status;
1234out:
1235 return 0;
1236}
1237
1238int
1239bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1240{
1241 struct bfa_bsg_diag_loopback_s *iocmd =
1242 (struct bfa_bsg_diag_loopback_s *)cmd;
1243 struct bfad_hal_comp fcomp;
1244 unsigned long flags;
1245
1246 init_completion(&fcomp.comp);
1247 spin_lock_irqsave(&bfad->bfad_lock, flags);
1248 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1249 iocmd->speed, iocmd->lpcnt, iocmd->pat,
1250 &iocmd->result, bfad_hcb_comp, &fcomp);
1251 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1252 bfa_trc(bfad, iocmd->status);
1253 if (iocmd->status != BFA_STATUS_OK)
1254 goto out;
1255 wait_for_completion(&fcomp.comp);
1256 iocmd->status = fcomp.status;
1257out:
1258 return 0;
1259}
1260
1261int
1262bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1263{
1264 struct bfa_bsg_diag_fwping_s *iocmd =
1265 (struct bfa_bsg_diag_fwping_s *)cmd;
1266 struct bfad_hal_comp fcomp;
1267 unsigned long flags;
1268
1269 init_completion(&fcomp.comp);
1270 spin_lock_irqsave(&bfad->bfad_lock, flags);
1271 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1272 iocmd->pattern, &iocmd->result,
1273 bfad_hcb_comp, &fcomp);
1274 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1275 bfa_trc(bfad, iocmd->status);
1276 if (iocmd->status != BFA_STATUS_OK)
1277 goto out;
1278 bfa_trc(bfad, 0x77771);
1279 wait_for_completion(&fcomp.comp);
1280 iocmd->status = fcomp.status;
1281out:
1282 return 0;
1283}
1284
1285int
1286bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1287{
1288 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1289 struct bfad_hal_comp fcomp;
1290 unsigned long flags;
1291
1292 init_completion(&fcomp.comp);
1293 spin_lock_irqsave(&bfad->bfad_lock, flags);
1294 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1295 iocmd->queue, &iocmd->result,
1296 bfad_hcb_comp, &fcomp);
1297 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1298 if (iocmd->status != BFA_STATUS_OK)
1299 goto out;
1300 wait_for_completion(&fcomp.comp);
1301 iocmd->status = fcomp.status;
1302out:
1303 return 0;
1304}
1305
1306int
1307bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1308{
1309 struct bfa_bsg_sfp_show_s *iocmd =
1310 (struct bfa_bsg_sfp_show_s *)cmd;
1311 struct bfad_hal_comp fcomp;
1312 unsigned long flags;
1313
1314 init_completion(&fcomp.comp);
1315 spin_lock_irqsave(&bfad->bfad_lock, flags);
1316 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1317 bfad_hcb_comp, &fcomp);
1318 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1319 bfa_trc(bfad, iocmd->status);
1320 if (iocmd->status != BFA_STATUS_OK)
1321 goto out;
1322 wait_for_completion(&fcomp.comp);
1323 iocmd->status = fcomp.status;
1324 bfa_trc(bfad, iocmd->status);
1325out:
1326 return 0;
1327}
1328
1329int
1330bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1331{
1332 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&bfad->bfad_lock, flags);
1336 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1337 &iocmd->ledtest);
1338 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1339 return 0;
1340}
1341
1342int
1343bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1344{
1345 struct bfa_bsg_diag_beacon_s *iocmd =
1346 (struct bfa_bsg_diag_beacon_s *)cmd;
1347 unsigned long flags;
1348
1349 spin_lock_irqsave(&bfad->bfad_lock, flags);
1350 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1351 iocmd->beacon, iocmd->link_e2e_beacon,
1352 iocmd->second);
1353 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1354 return 0;
1355}
1356
1357int
1358bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1359{
1360 struct bfa_bsg_diag_lb_stat_s *iocmd =
1361 (struct bfa_bsg_diag_lb_stat_s *)cmd;
1362 unsigned long flags;
1363
1364 spin_lock_irqsave(&bfad->bfad_lock, flags);
1365 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1366 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1367 bfa_trc(bfad, iocmd->status);
1368
1369 return 0;
1370}
1371
1372int
1373bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1374{
1375 struct bfa_bsg_phy_attr_s *iocmd =
1376 (struct bfa_bsg_phy_attr_s *)cmd;
1377 struct bfad_hal_comp fcomp;
1378 unsigned long flags;
1379
1380 init_completion(&fcomp.comp);
1381 spin_lock_irqsave(&bfad->bfad_lock, flags);
1382 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1383 &iocmd->attr, bfad_hcb_comp, &fcomp);
1384 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1385 if (iocmd->status != BFA_STATUS_OK)
1386 goto out;
1387 wait_for_completion(&fcomp.comp);
1388 iocmd->status = fcomp.status;
1389out:
1390 return 0;
1391}
1392
1393int
1394bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1395{
1396 struct bfa_bsg_phy_stats_s *iocmd =
1397 (struct bfa_bsg_phy_stats_s *)cmd;
1398 struct bfad_hal_comp fcomp;
1399 unsigned long flags;
1400
1401 init_completion(&fcomp.comp);
1402 spin_lock_irqsave(&bfad->bfad_lock, flags);
1403 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1404 &iocmd->stats, bfad_hcb_comp, &fcomp);
1405 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1406 if (iocmd->status != BFA_STATUS_OK)
1407 goto out;
1408 wait_for_completion(&fcomp.comp);
1409 iocmd->status = fcomp.status;
1410out:
1411 return 0;
1412}
1413
1414int
1415bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1416{
1417 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1418 struct bfad_hal_comp fcomp;
1419 void *iocmd_bufptr;
1420 unsigned long flags;
1421
1422 if (bfad_chk_iocmd_sz(payload_len,
1423 sizeof(struct bfa_bsg_phy_s),
1424 iocmd->bufsz) != BFA_STATUS_OK) {
1425 iocmd->status = BFA_STATUS_VERSION_FAIL;
1426 return 0;
1427 }
1428
1429 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1430 init_completion(&fcomp.comp);
1431 spin_lock_irqsave(&bfad->bfad_lock, flags);
1432 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1433 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1434 0, bfad_hcb_comp, &fcomp);
1435 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1436 if (iocmd->status != BFA_STATUS_OK)
1437 goto out;
1438 wait_for_completion(&fcomp.comp);
1439 iocmd->status = fcomp.status;
1440 if (iocmd->status != BFA_STATUS_OK)
1441 goto out;
1442out:
1443 return 0;
1444}
1445
1446int
1447bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1448{
1449 struct bfa_bsg_vhba_attr_s *iocmd =
1450 (struct bfa_bsg_vhba_attr_s *)cmd;
1451 struct bfa_vhba_attr_s *attr = &iocmd->attr;
1452 unsigned long flags;
1453
1454 spin_lock_irqsave(&bfad->bfad_lock, flags);
1455 attr->pwwn = bfad->bfa.ioc.attr->pwwn;
1456 attr->nwwn = bfad->bfa.ioc.attr->nwwn;
1457 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1458 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1459 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
1460 iocmd->status = BFA_STATUS_OK;
1461 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1462 return 0;
1463}
1464
1465int
1466bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1467{
1468 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1469 void *iocmd_bufptr;
1470 struct bfad_hal_comp fcomp;
1471 unsigned long flags;
1472
1473 if (bfad_chk_iocmd_sz(payload_len,
1474 sizeof(struct bfa_bsg_phy_s),
1475 iocmd->bufsz) != BFA_STATUS_OK) {
1476 iocmd->status = BFA_STATUS_VERSION_FAIL;
1477 return 0;
1478 }
1479
1480 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1481 init_completion(&fcomp.comp);
1482 spin_lock_irqsave(&bfad->bfad_lock, flags);
1483 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1484 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1485 0, bfad_hcb_comp, &fcomp);
1486 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1487 if (iocmd->status != BFA_STATUS_OK)
1488 goto out;
1489 wait_for_completion(&fcomp.comp);
1490 iocmd->status = fcomp.status;
1491out:
1492 return 0;
1493}
1494
1495int
1496bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
1497{
1498 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1499 void *iocmd_bufptr;
1500
1501 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
1502 bfa_trc(bfad, sizeof(struct bfa_plog_s));
1503 iocmd->status = BFA_STATUS_EINVAL;
1504 goto out;
1505 }
1506
1507 iocmd->status = BFA_STATUS_OK;
1508 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1509 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
1510out:
1511 return 0;
1512}
1513
1514static int
1515bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1516 unsigned int payload_len)
1517{
1518 int rc = EINVAL;
1519
1520 switch (cmd) {
1521 case IOCMD_IOC_ENABLE:
1522 rc = bfad_iocmd_ioc_enable(bfad, iocmd);
1523 break;
1524 case IOCMD_IOC_DISABLE:
1525 rc = bfad_iocmd_ioc_disable(bfad, iocmd);
1526 break;
1527 case IOCMD_IOC_GET_INFO:
1528 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
1529 break;
1530 case IOCMD_IOC_GET_ATTR:
1531 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
1532 break;
1533 case IOCMD_IOC_GET_STATS:
1534 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
1535 break;
1536 case IOCMD_IOC_GET_FWSTATS:
1537 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
1538 break;
1539 case IOCMD_IOCFC_GET_ATTR:
1540 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
1541 break;
1542 case IOCMD_IOCFC_SET_INTR:
1543 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
1544 break;
1545 case IOCMD_PORT_ENABLE:
1546 rc = bfad_iocmd_port_enable(bfad, iocmd);
1547 break;
1548 case IOCMD_PORT_DISABLE:
1549 rc = bfad_iocmd_port_disable(bfad, iocmd);
1550 break;
1551 case IOCMD_PORT_GET_ATTR:
1552 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
1553 break;
1554 case IOCMD_PORT_GET_STATS:
1555 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
1556 break;
1557 case IOCMD_LPORT_GET_ATTR:
1558 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
1559 break;
1560 case IOCMD_LPORT_GET_STATS:
1561 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
1562 break;
1563 case IOCMD_LPORT_GET_IOSTATS:
1564 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
1565 break;
1566 case IOCMD_LPORT_GET_RPORTS:
1567 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
1568 break;
1569 case IOCMD_RPORT_GET_ATTR:
1570 rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
1571 break;
1572 case IOCMD_RPORT_GET_ADDR:
1573 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
1574 break;
1575 case IOCMD_RPORT_GET_STATS:
1576 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
1577 break;
1578 case IOCMD_FABRIC_GET_LPORTS:
1579 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
1580 break;
1581 case IOCMD_FCPIM_MODSTATS:
1582 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
1583 break;
1584 case IOCMD_FCPIM_DEL_ITN_STATS:
1585 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
1586 break;
1587 case IOCMD_ITNIM_GET_ATTR:
1588 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
1589 break;
1590 case IOCMD_ITNIM_GET_IOSTATS:
1591 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
1592 break;
1593 case IOCMD_ITNIM_GET_ITNSTATS:
1594 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
1595 break;
1596 case IOCMD_FCPORT_ENABLE:
1597 rc = bfad_iocmd_fcport_enable(bfad, iocmd);
1598 break;
1599 case IOCMD_FCPORT_DISABLE:
1600 rc = bfad_iocmd_fcport_disable(bfad, iocmd);
1601 break;
1602 case IOCMD_IOC_PCIFN_CFG:
1603 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
1604 break;
1605 case IOCMD_PCIFN_CREATE:
1606 rc = bfad_iocmd_pcifn_create(bfad, iocmd);
1607 break;
1608 case IOCMD_PCIFN_DELETE:
1609 rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
1610 break;
1611 case IOCMD_PCIFN_BW:
1612 rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
1613 break;
1614 case IOCMD_ADAPTER_CFG_MODE:
1615 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
1616 break;
1617 case IOCMD_PORT_CFG_MODE:
1618 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
1619 break;
1620 case IOCMD_FLASH_ENABLE_OPTROM:
1621 case IOCMD_FLASH_DISABLE_OPTROM:
1622 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
1623 break;
1624 case IOCMD_FAA_ENABLE:
1625 rc = bfad_iocmd_faa_enable(bfad, iocmd);
1626 break;
1627 case IOCMD_FAA_DISABLE:
1628 rc = bfad_iocmd_faa_disable(bfad, iocmd);
1629 break;
1630 case IOCMD_FAA_QUERY:
1631 rc = bfad_iocmd_faa_query(bfad, iocmd);
1632 break;
1633 case IOCMD_CEE_GET_ATTR:
1634 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
1635 break;
1636 case IOCMD_CEE_GET_STATS:
1637 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
1638 break;
1639 case IOCMD_CEE_RESET_STATS:
1640 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
1641 break;
1642 case IOCMD_SFP_MEDIA:
1643 rc = bfad_iocmd_sfp_media(bfad, iocmd);
1644 break;
1645 case IOCMD_SFP_SPEED:
1646 rc = bfad_iocmd_sfp_speed(bfad, iocmd);
1647 break;
1648 case IOCMD_FLASH_GET_ATTR:
1649 rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
1650 break;
1651 case IOCMD_FLASH_ERASE_PART:
1652 rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
1653 break;
1654 case IOCMD_FLASH_UPDATE_PART:
1655 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
1656 break;
1657 case IOCMD_FLASH_READ_PART:
1658 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
1659 break;
1660 case IOCMD_DIAG_TEMP:
1661 rc = bfad_iocmd_diag_temp(bfad, iocmd);
1662 break;
1663 case IOCMD_DIAG_MEMTEST:
1664 rc = bfad_iocmd_diag_memtest(bfad, iocmd);
1665 break;
1666 case IOCMD_DIAG_LOOPBACK:
1667 rc = bfad_iocmd_diag_loopback(bfad, iocmd);
1668 break;
1669 case IOCMD_DIAG_FWPING:
1670 rc = bfad_iocmd_diag_fwping(bfad, iocmd);
1671 break;
1672 case IOCMD_DIAG_QUEUETEST:
1673 rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
1674 break;
1675 case IOCMD_DIAG_SFP:
1676 rc = bfad_iocmd_diag_sfp(bfad, iocmd);
1677 break;
1678 case IOCMD_DIAG_LED:
1679 rc = bfad_iocmd_diag_led(bfad, iocmd);
1680 break;
1681 case IOCMD_DIAG_BEACON_LPORT:
1682 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
1683 break;
1684 case IOCMD_DIAG_LB_STAT:
1685 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
1686 break;
1687 case IOCMD_PHY_GET_ATTR:
1688 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
1689 break;
1690 case IOCMD_PHY_GET_STATS:
1691 rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
1692 break;
1693 case IOCMD_PHY_UPDATE_FW:
1694 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
1695 break;
1696 case IOCMD_PHY_READ_FW:
1697 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
1698 break;
1699 case IOCMD_VHBA_QUERY:
1700 rc = bfad_iocmd_vhba_query(bfad, iocmd);
1701 break;
1702 case IOCMD_DEBUG_PORTLOG:
1703 rc = bfad_iocmd_porglog_get(bfad, iocmd);
1704 break;
1705 default:
1706 rc = EINVAL;
1707 break;
1708 }
1709 return -rc;
1710}
1711
1712static int
1713bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
1714{
1715 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
1716 struct bfad_im_port_s *im_port =
1717 (struct bfad_im_port_s *) job->shost->hostdata[0];
1718 struct bfad_s *bfad = im_port->bfad;
1719 void *payload_kbuf;
1720 int rc = -EINVAL;
1721
1722 /* Allocate a temp buffer to hold the passed in user space command */
1723 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
1724 if (!payload_kbuf) {
1725 rc = -ENOMEM;
1726 goto out;
1727 }
1728
1729 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
1730 sg_copy_to_buffer(job->request_payload.sg_list,
1731 job->request_payload.sg_cnt, payload_kbuf,
1732 job->request_payload.payload_len);
1733
1734 /* Invoke IOCMD handler - to handle all the vendor command requests */
1735 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
1736 job->request_payload.payload_len);
1737 if (rc != BFA_STATUS_OK)
1738 goto error;
1739
1740 /* Copy the response data to the job->reply_payload sg_list */
1741 sg_copy_from_buffer(job->reply_payload.sg_list,
1742 job->reply_payload.sg_cnt,
1743 payload_kbuf,
1744 job->reply_payload.payload_len);
1745
1746 /* free the command buffer */
1747 kfree(payload_kbuf);
1748
1749 /* Fill the BSG job reply data */
1750 job->reply_len = job->reply_payload.payload_len;
1751 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
1752 job->reply->result = rc;
1753
1754 job->job_done(job);
1755 return rc;
1756error:
1757 /* free the command buffer */
1758 kfree(payload_kbuf);
1759out:
1760 job->reply->result = rc;
1761 job->reply_len = sizeof(uint32_t);
1762 job->reply->reply_payload_rcv_len = 0;
1763 return rc;
1764}
1765
1766/* FC passthru call backs */
1767u64
1768bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
1769{
1770 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1771 struct bfa_sge_s *sge;
1772 u64 addr;
1773
1774 sge = drv_fcxp->req_sge + sgeid;
1775 addr = (u64)(size_t) sge->sg_addr;
1776 return addr;
1777}
1778
1779u32
1780bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
1781{
1782 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1783 struct bfa_sge_s *sge;
1784
1785 sge = drv_fcxp->req_sge + sgeid;
1786 return sge->sg_len;
1787}
1788
1789u64
1790bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
1791{
1792 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1793 struct bfa_sge_s *sge;
1794 u64 addr;
1795
1796 sge = drv_fcxp->rsp_sge + sgeid;
1797 addr = (u64)(size_t) sge->sg_addr;
1798 return addr;
1799}
1800
1801u32
1802bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
1803{
1804 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1805 struct bfa_sge_s *sge;
1806
1807 sge = drv_fcxp->rsp_sge + sgeid;
1808 return sge->sg_len;
1809}
1810
1811void
1812bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
1813 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
1814 struct fchs_s *rsp_fchs)
1815{
1816 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1817
1818 drv_fcxp->req_status = req_status;
1819 drv_fcxp->rsp_len = rsp_len;
1820
1821 /* bfa_fcxp will be automatically freed by BFA */
1822 drv_fcxp->bfa_fcxp = NULL;
1823 complete(&drv_fcxp->comp);
1824}
1825
1826struct bfad_buf_info *
1827bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
1828 uint32_t payload_len, uint32_t *num_sgles)
1829{
1830 struct bfad_buf_info *buf_base, *buf_info;
1831 struct bfa_sge_s *sg_table;
1832 int sge_num = 1;
1833
1834 buf_base = kzalloc((sizeof(struct bfad_buf_info) +
1835 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
1836 if (!buf_base)
1837 return NULL;
1838
1839 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
1840 (sizeof(struct bfad_buf_info) * sge_num));
1841
1842 /* Allocate dma coherent memory */
1843 buf_info = buf_base;
1844 buf_info->size = payload_len;
1845 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
1846 &buf_info->phys, GFP_KERNEL);
1847 if (!buf_info->virt)
1848 goto out_free_mem;
1849
1850 /* copy the linear bsg buffer to buf_info */
1851 memset(buf_info->virt, 0, buf_info->size);
1852 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
1853
1854 /*
1855 * Setup SG table
1856 */
1857 sg_table->sg_len = buf_info->size;
1858 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
1859
1860 *num_sgles = sge_num;
1861
1862 return buf_base;
1863
1864out_free_mem:
1865 kfree(buf_base);
1866 return NULL;
1867}
1868
1869void
1870bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
1871 uint32_t num_sgles)
1872{
1873 int i;
1874 struct bfad_buf_info *buf_info = buf_base;
1875
1876 if (buf_base) {
1877 for (i = 0; i < num_sgles; buf_info++, i++) {
1878 if (buf_info->virt != NULL)
1879 dma_free_coherent(&bfad->pcidev->dev,
1880 buf_info->size, buf_info->virt,
1881 buf_info->phys);
1882 }
1883 kfree(buf_base);
1884 }
1885}
1886
1887int
1888bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
1889 bfa_bsg_fcpt_t *bsg_fcpt)
1890{
1891 struct bfa_fcxp_s *hal_fcxp;
1892 struct bfad_s *bfad = drv_fcxp->port->bfad;
1893 unsigned long flags;
1894 uint8_t lp_tag;
1895
1896 spin_lock_irqsave(&bfad->bfad_lock, flags);
1897
1898 /* Allocate bfa_fcxp structure */
1899 hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
1900 drv_fcxp->num_req_sgles,
1901 drv_fcxp->num_rsp_sgles,
1902 bfad_fcxp_get_req_sgaddr_cb,
1903 bfad_fcxp_get_req_sglen_cb,
1904 bfad_fcxp_get_rsp_sgaddr_cb,
1905 bfad_fcxp_get_rsp_sglen_cb);
1906 if (!hal_fcxp) {
1907 bfa_trc(bfad, 0);
1908 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1909 return BFA_STATUS_ENOMEM;
1910 }
1911
1912 drv_fcxp->bfa_fcxp = hal_fcxp;
1913
1914 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
1915
1916 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
1917 bsg_fcpt->cts, bsg_fcpt->cos,
1918 job->request_payload.payload_len,
1919 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
1920 job->reply_payload.payload_len, bsg_fcpt->tsecs);
1921
1922 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1923
1924 return BFA_STATUS_OK;
1925}
1926
1927int
1928bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
1929{
1930 struct bfa_bsg_data *bsg_data;
1931 struct bfad_im_port_s *im_port =
1932 (struct bfad_im_port_s *) job->shost->hostdata[0];
1933 struct bfad_s *bfad = im_port->bfad;
1934 bfa_bsg_fcpt_t *bsg_fcpt;
1935 struct bfad_fcxp *drv_fcxp;
1936 struct bfa_fcs_lport_s *fcs_port;
1937 struct bfa_fcs_rport_s *fcs_rport;
1938 uint32_t command_type = job->request->msgcode;
1939 unsigned long flags;
1940 struct bfad_buf_info *rsp_buf_info;
1941 void *req_kbuf = NULL, *rsp_kbuf = NULL;
1942 int rc = -EINVAL;
1943
1944 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
1945 job->reply->reply_payload_rcv_len = 0;
1946
1947 /* Get the payload passed in from userspace */
1948 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
1949 sizeof(struct fc_bsg_request));
1950 if (bsg_data == NULL)
1951 goto out;
1952
1953 /*
1954 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
1955 * buffer of size bsg_data->payload_len
1956 */
1957 bsg_fcpt = (struct bfa_bsg_fcpt_s *)
1958 kzalloc(bsg_data->payload_len, GFP_KERNEL);
1959 if (!bsg_fcpt)
1960 goto out;
1961
1962 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
1963 bsg_data->payload_len)) {
1964 kfree(bsg_fcpt);
1965 goto out;
1966 }
1967
1968 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
1969 if (drv_fcxp == NULL) {
1970 rc = -ENOMEM;
1971 goto out;
1972 }
1973
1974 spin_lock_irqsave(&bfad->bfad_lock, flags);
1975 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
1976 bsg_fcpt->lpwwn);
1977 if (fcs_port == NULL) {
1978 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
1979 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1980 goto out_free_mem;
1981 }
1982
1983 /* Check if the port is online before sending FC Passthru cmd */
1984 if (!bfa_fcs_lport_is_online(fcs_port)) {
1985 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
1986 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1987 goto out_free_mem;
1988 }
1989
1990 drv_fcxp->port = fcs_port->bfad_port;
1991
1992 if (drv_fcxp->port->bfad == 0)
1993 drv_fcxp->port->bfad = bfad;
1994
1995 /* Fetch the bfa_rport - if nexus needed */
1996 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
1997 command_type == FC_BSG_HST_CT) {
1998 /* BSG HST commands: no nexus needed */
1999 drv_fcxp->bfa_rport = NULL;
2000
2001 } else if (command_type == FC_BSG_RPT_ELS ||
2002 command_type == FC_BSG_RPT_CT) {
2003 /* BSG RPT commands: nexus needed */
2004 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
2005 bsg_fcpt->dpwwn);
2006 if (fcs_rport == NULL) {
2007 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
2008 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2009 goto out_free_mem;
2010 }
2011
2012 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
2013
2014 } else { /* Unknown BSG msgcode; return -EINVAL */
2015 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2016 goto out_free_mem;
2017 }
2018
2019 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2020
2021 /* allocate memory for req / rsp buffers */
2022 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
2023 if (!req_kbuf) {
2024 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
2025 bfad->pci_name);
2026 rc = -ENOMEM;
2027 goto out_free_mem;
2028 }
2029
2030 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
2031 if (!rsp_kbuf) {
2032 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
2033 bfad->pci_name);
2034 rc = -ENOMEM;
2035 goto out_free_mem;
2036 }
2037
2038 /* map req sg - copy the sg_list passed in to the linear buffer */
2039 sg_copy_to_buffer(job->request_payload.sg_list,
2040 job->request_payload.sg_cnt, req_kbuf,
2041 job->request_payload.payload_len);
2042
2043 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
2044 job->request_payload.payload_len,
2045 &drv_fcxp->num_req_sgles);
2046 if (!drv_fcxp->reqbuf_info) {
2047 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
2048 bfad->pci_name);
2049 rc = -ENOMEM;
2050 goto out_free_mem;
2051 }
2052
2053 drv_fcxp->req_sge = (struct bfa_sge_s *)
2054 (((uint8_t *)drv_fcxp->reqbuf_info) +
2055 (sizeof(struct bfad_buf_info) *
2056 drv_fcxp->num_req_sgles));
2057
2058 /* map rsp sg */
2059 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
2060 job->reply_payload.payload_len,
2061 &drv_fcxp->num_rsp_sgles);
2062 if (!drv_fcxp->rspbuf_info) {
2063 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
2064 bfad->pci_name);
2065 rc = -ENOMEM;
2066 goto out_free_mem;
2067 }
2068
2069 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
2070 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
2071 (((uint8_t *)drv_fcxp->rspbuf_info) +
2072 (sizeof(struct bfad_buf_info) *
2073 drv_fcxp->num_rsp_sgles));
2074
2075 /* fcxp send */
2076 init_completion(&drv_fcxp->comp);
2077 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
2078 if (rc == BFA_STATUS_OK) {
2079 wait_for_completion(&drv_fcxp->comp);
2080 bsg_fcpt->status = drv_fcxp->req_status;
2081 } else {
2082 bsg_fcpt->status = rc;
2083 goto out_free_mem;
2084 }
2085
2086 /* fill the job->reply data */
2087 if (drv_fcxp->req_status == BFA_STATUS_OK) {
2088 job->reply_len = drv_fcxp->rsp_len;
2089 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
2090 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2091 } else {
2092 job->reply->reply_payload_rcv_len =
2093 sizeof(struct fc_bsg_ctels_reply);
2094 job->reply_len = sizeof(uint32_t);
2095 job->reply->reply_data.ctels_reply.status =
2096 FC_CTELS_STATUS_REJECT;
2097 }
2098
2099 /* Copy the response data to the reply_payload sg list */
2100 sg_copy_from_buffer(job->reply_payload.sg_list,
2101 job->reply_payload.sg_cnt,
2102 (uint8_t *)rsp_buf_info->virt,
2103 job->reply_payload.payload_len);
2104
2105out_free_mem:
2106 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
2107 drv_fcxp->num_rsp_sgles);
2108 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
2109 drv_fcxp->num_req_sgles);
2110 kfree(req_kbuf);
2111 kfree(rsp_kbuf);
2112
2113 /* Need a copy to user op */
2114 if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
2115 bsg_data->payload_len))
2116 rc = -EIO;
2117
2118 kfree(bsg_fcpt);
2119 kfree(drv_fcxp);
2120out:
2121 job->reply->result = rc;
2122
2123 if (rc == BFA_STATUS_OK)
2124 job->job_done(job);
2125
2126 return rc;
2127}
2128
2129int
2130bfad_im_bsg_request(struct fc_bsg_job *job)
2131{
2132 uint32_t rc = BFA_STATUS_OK;
2133
2134 switch (job->request->msgcode) {
2135 case FC_BSG_HST_VENDOR:
2136 /* Process BSG HST Vendor requests */
2137 rc = bfad_im_bsg_vendor_request(job);
2138 break;
2139 case FC_BSG_HST_ELS_NOLOGIN:
2140 case FC_BSG_RPT_ELS:
2141 case FC_BSG_HST_CT:
2142 case FC_BSG_RPT_CT:
2143 /* Process BSG ELS/CT commands */
2144 rc = bfad_im_bsg_els_ct_request(job);
2145 break;
2146 default:
2147 job->reply->result = rc = -EINVAL;
2148 job->reply->reply_payload_rcv_len = 0;
2149 break;
2150 }
2151
2152 return rc;
2153}
2154
2155int
2156bfad_im_bsg_timeout(struct fc_bsg_job *job)
2157{
2158 /* Don't complete the BSG job request - return -EAGAIN
2159 * to reset bsg job timeout : for ELS/CT pass thru we
2160 * already have timer to track the request.
2161 */
2162 return -EAGAIN;
2163}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644
index 000000000000..99b0e8a70c89
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -0,0 +1,509 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#ifndef BFAD_BSG_H
18#define BFAD_BSG_H
19
20#include "bfa_defs.h"
21#include "bfa_defs_fcs.h"
22
23/* Definitions of vendor unique structures and command codes passed in
24 * using FC_BSG_HST_VENDOR message code.
25 */
26enum {
27 IOCMD_IOC_ENABLE = 0x1,
28 IOCMD_IOC_DISABLE,
29 IOCMD_IOC_GET_ATTR,
30 IOCMD_IOC_GET_INFO,
31 IOCMD_IOC_GET_STATS,
32 IOCMD_IOC_GET_FWSTATS,
33 IOCMD_IOCFC_GET_ATTR,
34 IOCMD_IOCFC_SET_INTR,
35 IOCMD_PORT_ENABLE,
36 IOCMD_PORT_DISABLE,
37 IOCMD_PORT_GET_ATTR,
38 IOCMD_PORT_GET_STATS,
39 IOCMD_LPORT_GET_ATTR,
40 IOCMD_LPORT_GET_RPORTS,
41 IOCMD_LPORT_GET_STATS,
42 IOCMD_LPORT_GET_IOSTATS,
43 IOCMD_RPORT_GET_ATTR,
44 IOCMD_RPORT_GET_ADDR,
45 IOCMD_RPORT_GET_STATS,
46 IOCMD_FABRIC_GET_LPORTS,
47 IOCMD_FCPIM_MODSTATS,
48 IOCMD_FCPIM_DEL_ITN_STATS,
49 IOCMD_ITNIM_GET_ATTR,
50 IOCMD_ITNIM_GET_IOSTATS,
51 IOCMD_ITNIM_GET_ITNSTATS,
52 IOCMD_IOC_PCIFN_CFG,
53 IOCMD_FCPORT_ENABLE,
54 IOCMD_FCPORT_DISABLE,
55 IOCMD_PCIFN_CREATE,
56 IOCMD_PCIFN_DELETE,
57 IOCMD_PCIFN_BW,
58 IOCMD_ADAPTER_CFG_MODE,
59 IOCMD_PORT_CFG_MODE,
60 IOCMD_FLASH_ENABLE_OPTROM,
61 IOCMD_FLASH_DISABLE_OPTROM,
62 IOCMD_FAA_ENABLE,
63 IOCMD_FAA_DISABLE,
64 IOCMD_FAA_QUERY,
65 IOCMD_CEE_GET_ATTR,
66 IOCMD_CEE_GET_STATS,
67 IOCMD_CEE_RESET_STATS,
68 IOCMD_SFP_MEDIA,
69 IOCMD_SFP_SPEED,
70 IOCMD_FLASH_GET_ATTR,
71 IOCMD_FLASH_ERASE_PART,
72 IOCMD_FLASH_UPDATE_PART,
73 IOCMD_FLASH_READ_PART,
74 IOCMD_DIAG_TEMP,
75 IOCMD_DIAG_MEMTEST,
76 IOCMD_DIAG_LOOPBACK,
77 IOCMD_DIAG_FWPING,
78 IOCMD_DIAG_QUEUETEST,
79 IOCMD_DIAG_SFP,
80 IOCMD_DIAG_LED,
81 IOCMD_DIAG_BEACON_LPORT,
82 IOCMD_DIAG_LB_STAT,
83 IOCMD_PHY_GET_ATTR,
84 IOCMD_PHY_GET_STATS,
85 IOCMD_PHY_UPDATE_FW,
86 IOCMD_PHY_READ_FW,
87 IOCMD_VHBA_QUERY,
88 IOCMD_DEBUG_PORTLOG,
89};
90
91struct bfa_bsg_gen_s {
92 bfa_status_t status;
93 u16 bfad_num;
94 u16 rsvd;
95};
96
97struct bfa_bsg_ioc_info_s {
98 bfa_status_t status;
99 u16 bfad_num;
100 u16 rsvd;
101 char serialnum[64];
102 char hwpath[BFA_STRING_32];
103 char adapter_hwpath[BFA_STRING_32];
104 char guid[BFA_ADAPTER_SYM_NAME_LEN*2];
105 char name[BFA_ADAPTER_SYM_NAME_LEN];
106 char port_name[BFA_ADAPTER_SYM_NAME_LEN];
107 char eth_name[BFA_ADAPTER_SYM_NAME_LEN];
108 wwn_t pwwn;
109 wwn_t nwwn;
110 wwn_t factorypwwn;
111 wwn_t factorynwwn;
112 mac_t mac;
113 mac_t factory_mac; /* Factory mac address */
114 mac_t current_mac; /* Currently assigned mac address */
115 enum bfa_ioc_type_e ioc_type;
116 u16 pvid; /* Port vlan id */
117 u16 rsvd1;
118 u32 host;
119 u32 bandwidth; /* For PF support */
120 u32 rsvd2;
121};
122
123struct bfa_bsg_ioc_attr_s {
124 bfa_status_t status;
125 u16 bfad_num;
126 u16 rsvd;
127 struct bfa_ioc_attr_s ioc_attr;
128};
129
130struct bfa_bsg_ioc_stats_s {
131 bfa_status_t status;
132 u16 bfad_num;
133 u16 rsvd;
134 struct bfa_ioc_stats_s ioc_stats;
135};
136
137struct bfa_bsg_ioc_fwstats_s {
138 bfa_status_t status;
139 u16 bfad_num;
140 u16 rsvd;
141 u32 buf_size;
142 u32 rsvd1;
143 u64 buf_ptr;
144};
145
146struct bfa_bsg_iocfc_attr_s {
147 bfa_status_t status;
148 u16 bfad_num;
149 u16 rsvd;
150 struct bfa_iocfc_attr_s iocfc_attr;
151};
152
153struct bfa_bsg_iocfc_intr_s {
154 bfa_status_t status;
155 u16 bfad_num;
156 u16 rsvd;
157 struct bfa_iocfc_intr_attr_s attr;
158};
159
160struct bfa_bsg_port_attr_s {
161 bfa_status_t status;
162 u16 bfad_num;
163 u16 rsvd;
164 struct bfa_port_attr_s attr;
165};
166
167struct bfa_bsg_port_stats_s {
168 bfa_status_t status;
169 u16 bfad_num;
170 u16 rsvd;
171 u32 buf_size;
172 u32 rsvd1;
173 u64 buf_ptr;
174};
175
176struct bfa_bsg_lport_attr_s {
177 bfa_status_t status;
178 u16 bfad_num;
179 u16 vf_id;
180 wwn_t pwwn;
181 struct bfa_lport_attr_s port_attr;
182};
183
184struct bfa_bsg_lport_stats_s {
185 bfa_status_t status;
186 u16 bfad_num;
187 u16 vf_id;
188 wwn_t pwwn;
189 struct bfa_lport_stats_s port_stats;
190};
191
192struct bfa_bsg_lport_iostats_s {
193 bfa_status_t status;
194 u16 bfad_num;
195 u16 vf_id;
196 wwn_t pwwn;
197 struct bfa_itnim_iostats_s iostats;
198};
199
200struct bfa_bsg_lport_get_rports_s {
201 bfa_status_t status;
202 u16 bfad_num;
203 u16 vf_id;
204 wwn_t pwwn;
205 u64 rbuf_ptr;
206 u32 nrports;
207 u32 rsvd;
208};
209
210struct bfa_bsg_rport_attr_s {
211 bfa_status_t status;
212 u16 bfad_num;
213 u16 vf_id;
214 wwn_t pwwn;
215 wwn_t rpwwn;
216 struct bfa_rport_attr_s attr;
217};
218
219struct bfa_bsg_rport_stats_s {
220 bfa_status_t status;
221 u16 bfad_num;
222 u16 vf_id;
223 wwn_t pwwn;
224 wwn_t rpwwn;
225 struct bfa_rport_stats_s stats;
226};
227
228struct bfa_bsg_rport_scsi_addr_s {
229 bfa_status_t status;
230 u16 bfad_num;
231 u16 vf_id;
232 wwn_t pwwn;
233 wwn_t rpwwn;
234 u32 host;
235 u32 bus;
236 u32 target;
237 u32 lun;
238};
239
240struct bfa_bsg_fabric_get_lports_s {
241 bfa_status_t status;
242 u16 bfad_num;
243 u16 vf_id;
244 u64 buf_ptr;
245 u32 nports;
246 u32 rsvd;
247};
248
249struct bfa_bsg_fcpim_modstats_s {
250 bfa_status_t status;
251 u16 bfad_num;
252 struct bfa_itnim_iostats_s modstats;
253};
254
255struct bfa_bsg_fcpim_del_itn_stats_s {
256 bfa_status_t status;
257 u16 bfad_num;
258 struct bfa_fcpim_del_itn_stats_s modstats;
259};
260
261struct bfa_bsg_itnim_attr_s {
262 bfa_status_t status;
263 u16 bfad_num;
264 u16 vf_id;
265 wwn_t lpwwn;
266 wwn_t rpwwn;
267 struct bfa_itnim_attr_s attr;
268};
269
270struct bfa_bsg_itnim_iostats_s {
271 bfa_status_t status;
272 u16 bfad_num;
273 u16 vf_id;
274 wwn_t lpwwn;
275 wwn_t rpwwn;
276 struct bfa_itnim_iostats_s iostats;
277};
278
279struct bfa_bsg_itnim_itnstats_s {
280 bfa_status_t status;
281 u16 bfad_num;
282 u16 vf_id;
283 wwn_t lpwwn;
284 wwn_t rpwwn;
285 struct bfa_itnim_stats_s itnstats;
286};
287
288struct bfa_bsg_pcifn_cfg_s {
289 bfa_status_t status;
290 u16 bfad_num;
291 u16 rsvd;
292 struct bfa_ablk_cfg_s pcifn_cfg;
293};
294
295struct bfa_bsg_pcifn_s {
296 bfa_status_t status;
297 u16 bfad_num;
298 u16 pcifn_id;
299 u32 bandwidth;
300 u8 port;
301 enum bfi_pcifn_class pcifn_class;
302 u8 rsvd[1];
303};
304
305struct bfa_bsg_adapter_cfg_mode_s {
306 bfa_status_t status;
307 u16 bfad_num;
308 u16 rsvd;
309 struct bfa_adapter_cfg_mode_s cfg;
310};
311
312struct bfa_bsg_port_cfg_mode_s {
313 bfa_status_t status;
314 u16 bfad_num;
315 u16 instance;
316 struct bfa_port_cfg_mode_s cfg;
317};
318
319struct bfa_bsg_faa_attr_s {
320 bfa_status_t status;
321 u16 bfad_num;
322 u16 rsvd;
323 struct bfa_faa_attr_s faa_attr;
324};
325
326struct bfa_bsg_cee_attr_s {
327 bfa_status_t status;
328 u16 bfad_num;
329 u16 rsvd;
330 u32 buf_size;
331 u32 rsvd1;
332 u64 buf_ptr;
333};
334
335struct bfa_bsg_cee_stats_s {
336 bfa_status_t status;
337 u16 bfad_num;
338 u16 rsvd;
339 u32 buf_size;
340 u32 rsvd1;
341 u64 buf_ptr;
342};
343
344struct bfa_bsg_sfp_media_s {
345 bfa_status_t status;
346 u16 bfad_num;
347 u16 rsvd;
348 enum bfa_defs_sfp_media_e media;
349};
350
351struct bfa_bsg_sfp_speed_s {
352 bfa_status_t status;
353 u16 bfad_num;
354 u16 rsvd;
355 enum bfa_port_speed speed;
356};
357
358struct bfa_bsg_flash_attr_s {
359 bfa_status_t status;
360 u16 bfad_num;
361 u16 rsvd;
362 struct bfa_flash_attr_s attr;
363};
364
365struct bfa_bsg_flash_s {
366 bfa_status_t status;
367 u16 bfad_num;
368 u8 instance;
369 u8 rsvd;
370 enum bfa_flash_part_type type;
371 int bufsz;
372 u64 buf_ptr;
373};
374
375struct bfa_bsg_diag_get_temp_s {
376 bfa_status_t status;
377 u16 bfad_num;
378 u16 rsvd;
379 struct bfa_diag_results_tempsensor_s result;
380};
381
382struct bfa_bsg_diag_memtest_s {
383 bfa_status_t status;
384 u16 bfad_num;
385 u16 rsvd[3];
386 u32 pat;
387 struct bfa_diag_memtest_result result;
388 struct bfa_diag_memtest_s memtest;
389};
390
391struct bfa_bsg_diag_loopback_s {
392 bfa_status_t status;
393 u16 bfad_num;
394 u16 rsvd;
395 enum bfa_port_opmode opmode;
396 enum bfa_port_speed speed;
397 u32 lpcnt;
398 u32 pat;
399 struct bfa_diag_loopback_result_s result;
400};
401
402struct bfa_bsg_diag_fwping_s {
403 bfa_status_t status;
404 u16 bfad_num;
405 u16 rsvd;
406 u32 cnt;
407 u32 pattern;
408 struct bfa_diag_results_fwping result;
409};
410
411struct bfa_bsg_diag_qtest_s {
412 bfa_status_t status;
413 u16 bfad_num;
414 u16 rsvd;
415 u32 force;
416 u32 queue;
417 struct bfa_diag_qtest_result_s result;
418};
419
420struct bfa_bsg_sfp_show_s {
421 bfa_status_t status;
422 u16 bfad_num;
423 u16 rsvd;
424 struct sfp_mem_s sfp;
425};
426
427struct bfa_bsg_diag_led_s {
428 bfa_status_t status;
429 u16 bfad_num;
430 u16 rsvd;
431 struct bfa_diag_ledtest_s ledtest;
432};
433
434struct bfa_bsg_diag_beacon_s {
435 bfa_status_t status;
436 u16 bfad_num;
437 u16 rsvd;
438 bfa_boolean_t beacon;
439 bfa_boolean_t link_e2e_beacon;
440 u32 second;
441};
442
443struct bfa_bsg_diag_lb_stat_s {
444 bfa_status_t status;
445 u16 bfad_num;
446 u16 rsvd;
447};
448
449struct bfa_bsg_phy_attr_s {
450 bfa_status_t status;
451 u16 bfad_num;
452 u16 instance;
453 struct bfa_phy_attr_s attr;
454};
455
456struct bfa_bsg_phy_s {
457 bfa_status_t status;
458 u16 bfad_num;
459 u16 instance;
460 u64 bufsz;
461 u64 buf_ptr;
462};
463
464struct bfa_bsg_debug_s {
465 bfa_status_t status;
466 u16 bfad_num;
467 u16 rsvd;
468 u32 bufsz;
469 int inst_no;
470 u64 buf_ptr;
471 u64 offset;
472};
473
474struct bfa_bsg_phy_stats_s {
475 bfa_status_t status;
476 u16 bfad_num;
477 u16 instance;
478 struct bfa_phy_stats_s stats;
479};
480
481struct bfa_bsg_vhba_attr_s {
482 bfa_status_t status;
483 u16 bfad_num;
484 u16 pcifn_id;
485 struct bfa_vhba_attr_s attr;
486};
487
488struct bfa_bsg_fcpt_s {
489 bfa_status_t status;
490 u16 vf_id;
491 wwn_t lpwwn;
492 wwn_t dpwwn;
493 u32 tsecs;
494 int cts;
495 enum fc_cos cos;
496 struct fchs_s fchs;
497};
498#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
499
500struct bfa_bsg_data {
501 int payload_len;
502 void *payload;
503};
504
505#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \
506 (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \
507 BFA_STATUS_FAILED : BFA_STATUS_OK)
508
509#endif /* BFAD_BSG_H */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 48be0c54f2de..b412e0300dd4 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -214,10 +214,10 @@ bfad_debugfs_read(struct file *file, char __user *buf,
214 214
215#define BFA_REG_CT_ADDRSZ (0x40000) 215#define BFA_REG_CT_ADDRSZ (0x40000)
216#define BFA_REG_CB_ADDRSZ (0x20000) 216#define BFA_REG_CB_ADDRSZ (0x20000)
217#define BFA_REG_ADDRSZ(__bfa) \ 217#define BFA_REG_ADDRSZ(__ioc) \
218 ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \ 218 ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ) 219 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
220#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1)) 220#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
221 221
222static bfa_status_t 222static bfa_status_t
223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) 223bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -236,7 +236,7 @@ bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
236 return BFA_STATUS_EINVAL; 236 return BFA_STATUS_EINVAL;
237 } else { 237 } else {
238 /* CB register space 64KB */ 238 /* CB register space 64KB */
239 if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa)) 239 if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc))
240 return BFA_STATUS_EINVAL; 240 return BFA_STATUS_EINVAL;
241 } 241 }
242 return BFA_STATUS_OK; 242 return BFA_STATUS_OK;
@@ -317,7 +317,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
317 317
318 bfad->reglen = len << 2; 318 bfad->reglen = len << 2;
319 rb = bfa_ioc_bar0(ioc); 319 rb = bfa_ioc_bar0(ioc);
320 addr &= BFA_REG_ADDRMSK(bfa); 320 addr &= BFA_REG_ADDRMSK(ioc);
321 321
322 /* offset and len sanity check */ 322 /* offset and len sanity check */
323 rc = bfad_reg_offset_check(bfa, addr, len); 323 rc = bfad_reg_offset_check(bfa, addr, len);
@@ -380,7 +380,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
380 } 380 }
381 kfree(kern_buf); 381 kfree(kern_buf);
382 382
383 addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */ 383 addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
384 384
385 /* offset and len sanity check */ 385 /* offset and len sanity check */
386 rc = bfad_reg_offset_check(bfa, addr, 1); 386 rc = bfad_reg_offset_check(bfa, addr, 1);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 7f9ea90254cd..48661a2726d7 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
43#include <scsi/scsi_tcq.h> 43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
45#include <scsi/scsi_transport.h> 45#include <scsi/scsi_transport.h>
46#include <scsi/scsi_bsg_fc.h>
46 47
47#include "bfa_modules.h" 48#include "bfa_modules.h"
48#include "bfa_fcs.h" 49#include "bfa_fcs.h"
@@ -55,7 +56,7 @@
55#ifdef BFA_DRIVER_VERSION 56#ifdef BFA_DRIVER_VERSION
56#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
57#else 58#else
58#define BFAD_DRIVER_VERSION "2.3.2.3" 59#define BFAD_DRIVER_VERSION "3.0.2.1"
59#endif 60#endif
60 61
61#define BFAD_PROTO_NAME FCPI_NAME 62#define BFAD_PROTO_NAME FCPI_NAME
@@ -79,7 +80,7 @@
79#define BFAD_HAL_INIT_FAIL 0x00000100 80#define BFAD_HAL_INIT_FAIL 0x00000100
80#define BFAD_FC4_PROBE_DONE 0x00000200 81#define BFAD_FC4_PROBE_DONE 0x00000200
81#define BFAD_PORT_DELETE 0x00000001 82#define BFAD_PORT_DELETE 0x00000001
82 83#define BFAD_INTX_ON 0x00000400
83/* 84/*
84 * BFAD related definition 85 * BFAD related definition
85 */ 86 */
@@ -92,6 +93,8 @@
92 */ 93 */
93#define BFAD_LUN_QUEUE_DEPTH 32 94#define BFAD_LUN_QUEUE_DEPTH 32
94#define BFAD_IO_MAX_SGE SG_ALL 95#define BFAD_IO_MAX_SGE SG_ALL
96#define BFAD_MIN_SECTORS 128 /* 64k */
97#define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */
95 98
96#define bfad_isr_t irq_handler_t 99#define bfad_isr_t irq_handler_t
97 100
@@ -110,6 +113,7 @@ struct bfad_msix_s {
110enum { 113enum {
111 BFA_TRC_LDRV_BFAD = 1, 114 BFA_TRC_LDRV_BFAD = 1,
112 BFA_TRC_LDRV_IM = 2, 115 BFA_TRC_LDRV_IM = 2,
116 BFA_TRC_LDRV_BSG = 3,
113}; 117};
114 118
115enum bfad_port_pvb_type { 119enum bfad_port_pvb_type {
@@ -189,8 +193,10 @@ struct bfad_s {
189 struct bfa_pcidev_s hal_pcidev; 193 struct bfa_pcidev_s hal_pcidev;
190 struct bfa_ioc_pci_attr_s pci_attr; 194 struct bfa_ioc_pci_attr_s pci_attr;
191 void __iomem *pci_bar0_kva; 195 void __iomem *pci_bar0_kva;
196 void __iomem *pci_bar2_kva;
192 struct completion comp; 197 struct completion comp;
193 struct completion suspend; 198 struct completion suspend;
199 struct completion enable_comp;
194 struct completion disable_comp; 200 struct completion disable_comp;
195 bfa_boolean_t disable_active; 201 bfa_boolean_t disable_active;
196 struct bfad_port_s pport; /* physical port of the BFAD */ 202 struct bfad_port_s pport; /* physical port of the BFAD */
@@ -273,21 +279,6 @@ struct bfad_hal_comp {
273 struct completion comp; 279 struct completion comp;
274}; 280};
275 281
276/*
277 * Macro to obtain the immediate lower power
278 * of two for the integer.
279 */
280#define nextLowerInt(x) \
281do { \
282 int __i; \
283 (*x)--; \
284 for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
285 (*x) = (*x) | (*x) >> __i; \
286 (*x)++; \
287 (*x) = (*x) >> 1; \
288} while (0)
289
290
291#define BFA_LOG(level, bfad, mask, fmt, arg...) \ 282#define BFA_LOG(level, bfad, mask, fmt, arg...) \
292do { \ 283do { \
293 if (((mask) == 4) || (level[1] <= '4')) \ 284 if (((mask) == 4) || (level[1] <= '4')) \
@@ -354,6 +345,7 @@ extern int msix_disable_ct;
354extern int fdmi_enable; 345extern int fdmi_enable;
355extern int supported_fc4s; 346extern int supported_fc4s;
356extern int pcie_max_read_reqsz; 347extern int pcie_max_read_reqsz;
348extern int max_xfer_size;
357extern int bfa_debugfs_enable; 349extern int bfa_debugfs_enable;
358extern struct mutex bfad_mutex; 350extern struct mutex bfad_mutex;
359 351
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c2b36179e8e8..f2bf81265ae5 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -175,21 +175,11 @@ bfad_im_info(struct Scsi_Host *shost)
175 struct bfad_im_port_s *im_port = 175 struct bfad_im_port_s *im_port =
176 (struct bfad_im_port_s *) shost->hostdata[0]; 176 (struct bfad_im_port_s *) shost->hostdata[0];
177 struct bfad_s *bfad = im_port->bfad; 177 struct bfad_s *bfad = im_port->bfad;
178 struct bfa_s *bfa = &bfad->bfa;
179 struct bfa_ioc_s *ioc = &bfa->ioc;
180 char model[BFA_ADAPTER_MODEL_NAME_LEN];
181
182 bfa_get_adapter_model(bfa, model);
183 178
184 memset(bfa_buf, 0, sizeof(bfa_buf)); 179 memset(bfa_buf, 0, sizeof(bfa_buf));
185 if (ioc->ctdev && !ioc->fcmode) 180 snprintf(bfa_buf, sizeof(bfa_buf),
186 snprintf(bfa_buf, sizeof(bfa_buf), 181 "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s",
187 "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", 182 bfad->pci_name, BFAD_DRIVER_VERSION);
188 model, bfad->pci_name, BFAD_DRIVER_VERSION);
189 else
190 snprintf(bfa_buf, sizeof(bfa_buf),
191 "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
192 model, bfad->pci_name, BFAD_DRIVER_VERSION);
193 183
194 return bfa_buf; 184 return bfa_buf;
195} 185}
@@ -572,9 +562,6 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
572 goto out_fc_rel; 562 goto out_fc_rel;
573 } 563 }
574 564
575 /* setup host fixed attribute if the lk supports */
576 bfad_fc_host_init(im_port);
577
578 return 0; 565 return 0;
579 566
580out_fc_rel: 567out_fc_rel:
@@ -713,6 +700,9 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
713 else 700 else
714 sht = &bfad_im_vport_template; 701 sht = &bfad_im_vport_template;
715 702
703 if (max_xfer_size != BFAD_MAX_SECTORS >> 1)
704 sht->max_sectors = max_xfer_size << 1;
705
716 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 706 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
717 707
718 return scsi_host_alloc(sht, sizeof(unsigned long)); 708 return scsi_host_alloc(sht, sizeof(unsigned long));
@@ -790,7 +780,8 @@ struct scsi_host_template bfad_im_scsi_host_template = {
790 .cmd_per_lun = 3, 780 .cmd_per_lun = 3,
791 .use_clustering = ENABLE_CLUSTERING, 781 .use_clustering = ENABLE_CLUSTERING,
792 .shost_attrs = bfad_im_host_attrs, 782 .shost_attrs = bfad_im_host_attrs,
793 .max_sectors = 0xFFFF, 783 .max_sectors = BFAD_MAX_SECTORS,
784 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
794}; 785};
795 786
796struct scsi_host_template bfad_im_vport_template = { 787struct scsi_host_template bfad_im_vport_template = {
@@ -811,7 +802,7 @@ struct scsi_host_template bfad_im_vport_template = {
811 .cmd_per_lun = 3, 802 .cmd_per_lun = 3,
812 .use_clustering = ENABLE_CLUSTERING, 803 .use_clustering = ENABLE_CLUSTERING,
813 .shost_attrs = bfad_im_vport_attrs, 804 .shost_attrs = bfad_im_vport_attrs,
814 .max_sectors = 0xFFFF, 805 .max_sectors = BFAD_MAX_SECTORS,
815}; 806};
816 807
817bfa_status_t 808bfa_status_t
@@ -925,7 +916,10 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
925 return 0; 916 return 0;
926 917
927 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 918 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
928 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 919 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS)
920 supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
921 FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT;
922 else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
929 if (ioc_attr->adapter_attr.is_mezz) { 923 if (ioc_attr->adapter_attr.is_mezz) {
930 supported_speed |= FC_PORTSPEED_8GBIT | 924 supported_speed |= FC_PORTSPEED_8GBIT |
931 FC_PORTSPEED_4GBIT | 925 FC_PORTSPEED_4GBIT |
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c296c8968511..4fe34d576b05 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,4 +141,7 @@ extern struct device_attribute *bfad_im_vport_attrs[];
141 141
142irqreturn_t bfad_intx(int irq, void *dev_id); 142irqreturn_t bfad_intx(int irq, void *dev_id);
143 143
144int bfad_im_bsg_request(struct fc_bsg_job *job);
145int bfad_im_bsg_timeout(struct fc_bsg_job *job);
146
144#endif 147#endif
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 72b69a0c3b51..1e258d5f8aec 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -23,17 +23,29 @@
23 23
24#pragma pack(1) 24#pragma pack(1)
25 25
26/* Per dma segment max size */
27#define BFI_MEM_DMA_SEG_SZ (131072)
28
29/* Get number of dma segments required */
30#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz) \
31 ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) & \
32 ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
33
34/* Get num dma reqs - that fit in a segment */
35#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
36
37/* Get segment num from tag */
38#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
39
40/* Get dma req offset in a segment */
41#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz) \
42 ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
43
26/* 44/*
27 * BFI FW image type 45 * BFI FW image type
28 */ 46 */
29#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ 47#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
30#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) 48#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
31enum {
32 BFI_IMAGE_CB_FC,
33 BFI_IMAGE_CT_FC,
34 BFI_IMAGE_CT_CNA,
35 BFI_IMAGE_MAX,
36};
37 49
38/* 50/*
39 * Msg header common to all msgs 51 * Msg header common to all msgs
@@ -43,17 +55,20 @@ struct bfi_mhdr_s {
43 u8 msg_id; /* msg opcode with in the class */ 55 u8 msg_id; /* msg opcode with in the class */
44 union { 56 union {
45 struct { 57 struct {
46 u8 rsvd; 58 u8 qid;
47 u8 lpu_id; /* msg destination */ 59 u8 fn_lpu; /* msg destination */
48 } h2i; 60 } h2i;
49 u16 i2htok; /* token in msgs to host */ 61 u16 i2htok; /* token in msgs to host */
50 } mtag; 62 } mtag;
51}; 63};
52 64
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \ 65#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
66#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
67
68#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
54 (_mh).msg_class = (_mc); \ 69 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \ 70 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \ 71 (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
57} while (0) 72} while (0)
58 73
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ 74#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
@@ -101,7 +116,7 @@ union bfi_addr_u {
101}; 116};
102 117
103/* 118/*
104 * Scatter Gather Element 119 * Scatter Gather Element used for fast-path IO requests
105 */ 120 */
106struct bfi_sge_s { 121struct bfi_sge_s {
107#ifdef __BIG_ENDIAN 122#ifdef __BIG_ENDIAN
@@ -116,6 +131,14 @@ struct bfi_sge_s {
116 union bfi_addr_u sga; 131 union bfi_addr_u sga;
117}; 132};
118 133
134/**
135 * Generic DMA addr-len pair.
136 */
137struct bfi_alen_s {
138 union bfi_addr_u al_addr; /* DMA addr of buffer */
139 u32 al_len; /* length of buffer */
140};
141
119/* 142/*
120 * Scatter Gather Page 143 * Scatter Gather Page
121 */ 144 */
@@ -127,6 +150,12 @@ struct bfi_sgpg_s {
127 u32 rsvd[BFI_SGPG_RSVD_WD_LEN]; 150 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
128}; 151};
129 152
153/* FCP module definitions */
154#define BFI_IO_MAX (2000)
155#define BFI_IOIM_SNSLEN (256)
156#define BFI_IOIM_SNSBUF_SEGS \
157 BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
158
130/* 159/*
131 * Large Message structure - 128 Bytes size Msgs 160 * Large Message structure - 128 Bytes size Msgs
132 */ 161 */
@@ -149,18 +178,29 @@ struct bfi_mbmsg_s {
149}; 178};
150 179
151/* 180/*
181 * Supported PCI function class codes (personality)
182 */
183enum bfi_pcifn_class {
184 BFI_PCIFN_CLASS_FC = 0x0c04,
185 BFI_PCIFN_CLASS_ETH = 0x0200,
186};
187
188/*
152 * Message Classes 189 * Message Classes
153 */ 190 */
154enum bfi_mclass { 191enum bfi_mclass {
155 BFI_MC_IOC = 1, /* IO Controller (IOC) */ 192 BFI_MC_IOC = 1, /* IO Controller (IOC) */
193 BFI_MC_DIAG = 2, /* Diagnostic Msgs */
194 BFI_MC_FLASH = 3, /* Flash message class */
195 BFI_MC_CEE = 4, /* CEE */
156 BFI_MC_FCPORT = 5, /* FC port */ 196 BFI_MC_FCPORT = 5, /* FC port */
157 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ 197 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
158 BFI_MC_LL = 7, /* Link Layer */ 198 BFI_MC_ABLK = 7, /* ASIC block configuration */
159 BFI_MC_UF = 8, /* Unsolicited frame receive */ 199 BFI_MC_UF = 8, /* Unsolicited frame receive */
160 BFI_MC_FCXP = 9, /* FC Transport */ 200 BFI_MC_FCXP = 9, /* FC Transport */
161 BFI_MC_LPS = 10, /* lport fc login services */ 201 BFI_MC_LPS = 10, /* lport fc login services */
162 BFI_MC_RPORT = 11, /* Remote port */ 202 BFI_MC_RPORT = 11, /* Remote port */
163 BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */ 203 BFI_MC_ITN = 12, /* I-T nexus (Initiator mode) */
164 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */ 204 BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
165 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */ 205 BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
166 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */ 206 BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
@@ -168,6 +208,8 @@ enum bfi_mclass {
168 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */ 208 BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
169 BFI_MC_TSKIM = 18, /* Initiator Task management */ 209 BFI_MC_TSKIM = 18, /* Initiator Task management */
170 BFI_MC_PORT = 21, /* Physical port */ 210 BFI_MC_PORT = 21, /* Physical port */
211 BFI_MC_SFP = 22, /* SFP module */
212 BFI_MC_PHY = 25, /* External PHY message class */
171 BFI_MC_MAX = 32 213 BFI_MC_MAX = 32
172}; 214};
173 215
@@ -175,23 +217,28 @@ enum bfi_mclass {
175#define BFI_IOC_MAX_CQS_ASIC 8 217#define BFI_IOC_MAX_CQS_ASIC 8
176#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ 218#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
177 219
178#define BFI_BOOT_TYPE_OFF 8
179#define BFI_BOOT_LOADER_OFF 12
180
181#define BFI_BOOT_TYPE_NORMAL 0
182#define BFI_BOOT_TYPE_FLASH 1
183#define BFI_BOOT_TYPE_MEMTEST 2
184
185#define BFI_BOOT_LOADER_OS 0
186#define BFI_BOOT_LOADER_BIOS 1
187#define BFI_BOOT_LOADER_UEFI 2
188
189/* 220/*
190 *---------------------------------------------------------------------- 221 *----------------------------------------------------------------------
191 * IOC 222 * IOC
192 *---------------------------------------------------------------------- 223 *----------------------------------------------------------------------
193 */ 224 */
194 225
226/*
227 * Different asic generations
228 */
229enum bfi_asic_gen {
230 BFI_ASIC_GEN_CB = 1, /* crossbow 8G FC */
231 BFI_ASIC_GEN_CT = 2, /* catapult 8G FC or 10G CNA */
232 BFI_ASIC_GEN_CT2 = 3, /* catapult-2 16G FC or 10G CNA */
233};
234
235enum bfi_asic_mode {
236 BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
237 BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
238 BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
239 BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
240};
241
195enum bfi_ioc_h2i_msgs { 242enum bfi_ioc_h2i_msgs {
196 BFI_IOC_H2I_ENABLE_REQ = 1, 243 BFI_IOC_H2I_ENABLE_REQ = 1,
197 BFI_IOC_H2I_DISABLE_REQ = 2, 244 BFI_IOC_H2I_DISABLE_REQ = 2,
@@ -204,8 +251,8 @@ enum bfi_ioc_i2h_msgs {
204 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), 251 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
205 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), 252 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
206 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), 253 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
207 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), 254 BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
208 BFI_IOC_I2H_HBEAT = BFA_I2HM(5), 255 BFI_IOC_I2H_ACQ_ADDR_REPLY = BFA_I2HM(5),
209}; 256};
210 257
211/* 258/*
@@ -220,7 +267,8 @@ struct bfi_ioc_attr_s {
220 wwn_t mfg_pwwn; /* Mfg port wwn */ 267 wwn_t mfg_pwwn; /* Mfg port wwn */
221 wwn_t mfg_nwwn; /* Mfg node wwn */ 268 wwn_t mfg_nwwn; /* Mfg node wwn */
222 mac_t mfg_mac; /* Mfg mac */ 269 mac_t mfg_mac; /* Mfg mac */
223 u16 rsvd_a; 270 u8 port_mode; /* bfi_port_mode */
271 u8 rsvd_a;
224 wwn_t pwwn; 272 wwn_t pwwn;
225 wwn_t nwwn; 273 wwn_t nwwn;
226 mac_t mac; /* PBC or Mfg mac */ 274 mac_t mac; /* PBC or Mfg mac */
@@ -272,21 +320,33 @@ struct bfi_ioc_getattr_reply_s {
272#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) 320#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
273#define BFI_IOC_MD5SUM_SZ 4 321#define BFI_IOC_MD5SUM_SZ 4
274struct bfi_ioc_image_hdr_s { 322struct bfi_ioc_image_hdr_s {
275 u32 signature; /* constant signature */ 323 u32 signature; /* constant signature */
276 u32 rsvd_a; 324 u8 asic_gen; /* asic generation */
277 u32 exec; /* exec vector */ 325 u8 asic_mode;
278 u32 param; /* parameters */ 326 u8 port0_mode; /* device mode for port 0 */
327 u8 port1_mode; /* device mode for port 1 */
328 u32 exec; /* exec vector */
329 u32 bootenv; /* fimware boot env */
279 u32 rsvd_b[4]; 330 u32 rsvd_b[4];
280 u32 md5sum[BFI_IOC_MD5SUM_SZ]; 331 u32 md5sum[BFI_IOC_MD5SUM_SZ];
281}; 332};
282 333
283/* 334#define BFI_FWBOOT_DEVMODE_OFF 4
284 * BFI_IOC_I2H_READY_EVENT message 335#define BFI_FWBOOT_TYPE_OFF 8
285 */ 336#define BFI_FWBOOT_ENV_OFF 12
286struct bfi_ioc_rdy_event_s { 337#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
287 struct bfi_mhdr_s mh; /* common msg header */ 338 (((u32)(__asic_gen)) << 24 | \
288 u8 init_status; /* init event status */ 339 ((u32)(__asic_mode)) << 16 | \
289 u8 rsvd[3]; 340 ((u32)(__p0_mode)) << 8 | \
341 ((u32)(__p1_mode)))
342
343#define BFI_FWBOOT_TYPE_NORMAL 0
344#define BFI_FWBOOT_TYPE_MEMTEST 2
345#define BFI_FWBOOT_ENV_OS 0
346
347enum bfi_port_mode {
348 BFI_PORT_MODE_FC = 1,
349 BFI_PORT_MODE_ETH = 2,
290}; 350};
291 351
292struct bfi_ioc_hbeat_s { 352struct bfi_ioc_hbeat_s {
@@ -345,8 +405,8 @@ enum {
345 */ 405 */
346struct bfi_ioc_ctrl_req_s { 406struct bfi_ioc_ctrl_req_s {
347 struct bfi_mhdr_s mh; 407 struct bfi_mhdr_s mh;
348 u8 ioc_class; 408 u16 clscode;
349 u8 rsvd[3]; 409 u16 rsvd;
350 u32 tv_sec; 410 u32 tv_sec;
351}; 411};
352#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; 412#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
@@ -358,7 +418,9 @@ struct bfi_ioc_ctrl_req_s {
358struct bfi_ioc_ctrl_reply_s { 418struct bfi_ioc_ctrl_reply_s {
359 struct bfi_mhdr_s mh; /* Common msg header */ 419 struct bfi_mhdr_s mh; /* Common msg header */
360 u8 status; /* enable/disable status */ 420 u8 status; /* enable/disable status */
361 u8 rsvd[3]; 421 u8 port_mode; /* bfa_mode_s */
422 u8 cap_bm; /* capability bit mask */
423 u8 rsvd;
362}; 424};
363#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s; 425#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
364#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; 426#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
@@ -380,7 +442,7 @@ union bfi_ioc_h2i_msg_u {
380 */ 442 */
381union bfi_ioc_i2h_msg_u { 443union bfi_ioc_i2h_msg_u {
382 struct bfi_mhdr_s mh; 444 struct bfi_mhdr_s mh;
383 struct bfi_ioc_rdy_event_s rdy_event; 445 struct bfi_ioc_ctrl_reply_s fw_event;
384 u32 mboxmsg[BFI_IOC_MSGSZ]; 446 u32 mboxmsg[BFI_IOC_MSGSZ];
385}; 447};
386 448
@@ -393,6 +455,7 @@ union bfi_ioc_i2h_msg_u {
393 455
394#define BFI_PBC_MAX_BLUNS 8 456#define BFI_PBC_MAX_BLUNS 8
395#define BFI_PBC_MAX_VPORTS 16 457#define BFI_PBC_MAX_VPORTS 16
458#define BFI_PBC_PORT_DISABLED 2
396 459
397/* 460/*
398 * PBC boot lun configuration 461 * PBC boot lun configuration
@@ -574,6 +637,496 @@ union bfi_port_i2h_msg_u {
574 struct bfi_port_generic_rsp_s clearstats_rsp; 637 struct bfi_port_generic_rsp_s clearstats_rsp;
575}; 638};
576 639
640/*
641 *----------------------------------------------------------------------
642 * ABLK
643 *----------------------------------------------------------------------
644 */
645enum bfi_ablk_h2i_msgs_e {
646 BFI_ABLK_H2I_QUERY = 1,
647 BFI_ABLK_H2I_ADPT_CONFIG = 2,
648 BFI_ABLK_H2I_PORT_CONFIG = 3,
649 BFI_ABLK_H2I_PF_CREATE = 4,
650 BFI_ABLK_H2I_PF_DELETE = 5,
651 BFI_ABLK_H2I_PF_UPDATE = 6,
652 BFI_ABLK_H2I_OPTROM_ENABLE = 7,
653 BFI_ABLK_H2I_OPTROM_DISABLE = 8,
654};
655
656enum bfi_ablk_i2h_msgs_e {
657 BFI_ABLK_I2H_QUERY = BFA_I2HM(BFI_ABLK_H2I_QUERY),
658 BFI_ABLK_I2H_ADPT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG),
659 BFI_ABLK_I2H_PORT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG),
660 BFI_ABLK_I2H_PF_CREATE = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE),
661 BFI_ABLK_I2H_PF_DELETE = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE),
662 BFI_ABLK_I2H_PF_UPDATE = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE),
663 BFI_ABLK_I2H_OPTROM_ENABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE),
664 BFI_ABLK_I2H_OPTROM_DISABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE),
665};
666
667/* BFI_ABLK_H2I_QUERY */
668struct bfi_ablk_h2i_query_s {
669 struct bfi_mhdr_s mh;
670 union bfi_addr_u addr;
671};
672
673/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */
674struct bfi_ablk_h2i_cfg_req_s {
675 struct bfi_mhdr_s mh;
676 u8 mode;
677 u8 port;
678 u8 max_pf;
679 u8 max_vf;
680};
681
682/*
683 * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE,
684 */
685struct bfi_ablk_h2i_pf_req_s {
686 struct bfi_mhdr_s mh;
687 u8 pcifn;
688 u8 port;
689 u16 pers;
690 u32 bw;
691};
692
693/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
694struct bfi_ablk_h2i_optrom_s {
695 struct bfi_mhdr_s mh;
696};
697
698/*
699 * BFI_ABLK_I2H_QUERY
700 * BFI_ABLK_I2H_PORT_CONFIG
701 * BFI_ABLK_I2H_PF_CREATE
702 * BFI_ABLK_I2H_PF_DELETE
703 * BFI_ABLK_I2H_PF_UPDATE
704 * BFI_ABLK_I2H_OPTROM_ENABLE
705 * BFI_ABLK_I2H_OPTROM_DISABLE
706 */
707struct bfi_ablk_i2h_rsp_s {
708 struct bfi_mhdr_s mh;
709 u8 status;
710 u8 pcifn;
711 u8 port_mode;
712};
713
714
715/*
716 * CEE module specific messages
717 */
718
719/* Mailbox commands from host to firmware */
720enum bfi_cee_h2i_msgs_e {
721 BFI_CEE_H2I_GET_CFG_REQ = 1,
722 BFI_CEE_H2I_RESET_STATS = 2,
723 BFI_CEE_H2I_GET_STATS_REQ = 3,
724};
725
726enum bfi_cee_i2h_msgs_e {
727 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
728 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
729 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
730};
731
732/*
733 * H2I command structure for resetting the stats
734 */
735struct bfi_cee_reset_stats_s {
736 struct bfi_mhdr_s mh;
737};
738
739/*
740 * Get configuration command from host
741 */
742struct bfi_cee_get_req_s {
743 struct bfi_mhdr_s mh;
744 union bfi_addr_u dma_addr;
745};
746
747/*
748 * Reply message from firmware
749 */
750struct bfi_cee_get_rsp_s {
751 struct bfi_mhdr_s mh;
752 u8 cmd_status;
753 u8 rsvd[3];
754};
755
756/*
757 * Reply message from firmware
758 */
759struct bfi_cee_stats_rsp_s {
760 struct bfi_mhdr_s mh;
761 u8 cmd_status;
762 u8 rsvd[3];
763};
764
765/* Mailbox message structures from firmware to host */
766union bfi_cee_i2h_msg_u {
767 struct bfi_mhdr_s mh;
768 struct bfi_cee_get_rsp_s get_rsp;
769 struct bfi_cee_stats_rsp_s stats_rsp;
770};
771
772/*
773 * SFP related
774 */
775
776enum bfi_sfp_h2i_e {
777 BFI_SFP_H2I_SHOW = 1,
778 BFI_SFP_H2I_SCN = 2,
779};
780
781enum bfi_sfp_i2h_e {
782 BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW),
783 BFI_SFP_I2H_SCN = BFA_I2HM(BFI_SFP_H2I_SCN),
784};
785
786/*
787 * SFP state
788 */
789enum bfa_sfp_stat_e {
790 BFA_SFP_STATE_INIT = 0, /* SFP state is uninit */
791 BFA_SFP_STATE_REMOVED = 1, /* SFP is removed */
792 BFA_SFP_STATE_INSERTED = 2, /* SFP is inserted */
793 BFA_SFP_STATE_VALID = 3, /* SFP is valid */
794 BFA_SFP_STATE_UNSUPPORT = 4, /* SFP is unsupport */
795 BFA_SFP_STATE_FAILED = 5, /* SFP i2c read fail */
796};
797
798/*
799 * SFP memory access type
800 */
801enum bfi_sfp_mem_e {
802 BFI_SFP_MEM_ALL = 0x1, /* access all data field */
803 BFI_SFP_MEM_DIAGEXT = 0x2, /* access diag ext data field only */
804};
805
806struct bfi_sfp_req_s {
807 struct bfi_mhdr_s mh;
808 u8 memtype;
809 u8 rsvd[3];
810 struct bfi_alen_s alen;
811};
812
813struct bfi_sfp_rsp_s {
814 struct bfi_mhdr_s mh;
815 u8 status;
816 u8 state;
817 u8 rsvd[2];
818};
819
820/*
821 * FLASH module specific
822 */
823enum bfi_flash_h2i_msgs {
824 BFI_FLASH_H2I_QUERY_REQ = 1,
825 BFI_FLASH_H2I_ERASE_REQ = 2,
826 BFI_FLASH_H2I_WRITE_REQ = 3,
827 BFI_FLASH_H2I_READ_REQ = 4,
828 BFI_FLASH_H2I_BOOT_VER_REQ = 5,
829};
830
831enum bfi_flash_i2h_msgs {
832 BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
833 BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
834 BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
835 BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
836 BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
837 BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
838};
839
840/*
841 * Flash query request
842 */
843struct bfi_flash_query_req_s {
844 struct bfi_mhdr_s mh; /* Common msg header */
845 struct bfi_alen_s alen;
846};
847
848/*
849 * Flash erase request
850 */
851struct bfi_flash_erase_req_s {
852 struct bfi_mhdr_s mh; /* Common msg header */
853 u32 type; /* partition type */
854 u8 instance; /* partition instance */
855 u8 rsv[3];
856};
857
858/*
859 * Flash write request
860 */
861struct bfi_flash_write_req_s {
862 struct bfi_mhdr_s mh; /* Common msg header */
863 struct bfi_alen_s alen;
864 u32 type; /* partition type */
865 u8 instance; /* partition instance */
866 u8 last;
867 u8 rsv[2];
868 u32 offset;
869 u32 length;
870};
871
872/*
873 * Flash read request
874 */
875struct bfi_flash_read_req_s {
876 struct bfi_mhdr_s mh; /* Common msg header */
877 u32 type; /* partition type */
878 u8 instance; /* partition instance */
879 u8 rsv[3];
880 u32 offset;
881 u32 length;
882 struct bfi_alen_s alen;
883};
884
885/*
886 * Flash query response
887 */
888struct bfi_flash_query_rsp_s {
889 struct bfi_mhdr_s mh; /* Common msg header */
890 u32 status;
891};
892
893/*
894 * Flash read response
895 */
896struct bfi_flash_read_rsp_s {
897 struct bfi_mhdr_s mh; /* Common msg header */
898 u32 type; /* partition type */
899 u8 instance; /* partition instance */
900 u8 rsv[3];
901 u32 status;
902 u32 length;
903};
904
905/*
906 * Flash write response
907 */
908struct bfi_flash_write_rsp_s {
909 struct bfi_mhdr_s mh; /* Common msg header */
910 u32 type; /* partition type */
911 u8 instance; /* partition instance */
912 u8 rsv[3];
913 u32 status;
914 u32 length;
915};
916
917/*
918 * Flash erase response
919 */
920struct bfi_flash_erase_rsp_s {
921 struct bfi_mhdr_s mh; /* Common msg header */
922 u32 type; /* partition type */
923 u8 instance; /* partition instance */
924 u8 rsv[3];
925 u32 status;
926};
927
928/*
929 *----------------------------------------------------------------------
930 * DIAG
931 *----------------------------------------------------------------------
932 */
933enum bfi_diag_h2i {
934 BFI_DIAG_H2I_PORTBEACON = 1,
935 BFI_DIAG_H2I_LOOPBACK = 2,
936 BFI_DIAG_H2I_FWPING = 3,
937 BFI_DIAG_H2I_TEMPSENSOR = 4,
938 BFI_DIAG_H2I_LEDTEST = 5,
939 BFI_DIAG_H2I_QTEST = 6,
940};
941
942enum bfi_diag_i2h {
943 BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON),
944 BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK),
945 BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING),
946 BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
947 BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
948 BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST),
949};
950
951#define BFI_DIAG_MAX_SGES 2
952#define BFI_DIAG_DMA_BUF_SZ (2 * 1024)
953#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
954#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
955
956struct bfi_diag_lb_req_s {
957 struct bfi_mhdr_s mh;
958 u32 loopcnt;
959 u32 pattern;
960 u8 lb_mode; /*!< bfa_port_opmode_t */
961 u8 speed; /*!< bfa_port_speed_t */
962 u8 rsvd[2];
963};
964
965struct bfi_diag_lb_rsp_s {
966 struct bfi_mhdr_s mh; /* 4 bytes */
967 struct bfa_diag_loopback_result_s res; /* 16 bytes */
968};
969
970struct bfi_diag_fwping_req_s {
971 struct bfi_mhdr_s mh; /* 4 bytes */
972 struct bfi_alen_s alen; /* 12 bytes */
973 u32 data; /* user input data pattern */
974 u32 count; /* user input dma count */
975 u8 qtag; /* track CPE vc */
976 u8 rsv[3];
977};
978
979struct bfi_diag_fwping_rsp_s {
980 struct bfi_mhdr_s mh; /* 4 bytes */
981 u32 data; /* user input data pattern */
982 u8 qtag; /* track CPE vc */
983 u8 dma_status; /* dma status */
984 u8 rsv[2];
985};
986
987/*
988 * Temperature Sensor
989 */
990struct bfi_diag_ts_req_s {
991 struct bfi_mhdr_s mh; /* 4 bytes */
992 u16 temp; /* 10-bit A/D value */
993 u16 brd_temp; /* 9-bit board temp */
994 u8 status;
995 u8 ts_junc; /* show junction tempsensor */
996 u8 ts_brd; /* show board tempsensor */
997 u8 rsv;
998};
999#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s
1000
1001struct bfi_diag_ledtest_req_s {
1002 struct bfi_mhdr_s mh; /* 4 bytes */
1003 u8 cmd;
1004 u8 color;
1005 u8 portid;
1006 u8 led; /* bitmap of LEDs to be tested */
1007 u16 freq; /* no. of blinks every 10 secs */
1008 u8 rsv[2];
1009};
1010
1011/* notify host led operation is done */
1012struct bfi_diag_ledtest_rsp_s {
1013 struct bfi_mhdr_s mh; /* 4 bytes */
1014};
1015
1016struct bfi_diag_portbeacon_req_s {
1017 struct bfi_mhdr_s mh; /* 4 bytes */
1018 u32 period; /* beaconing period */
1019 u8 beacon; /* 1: beacon on */
1020 u8 rsvd[3];
1021};
1022
1023/* notify host the beacon is off */
1024struct bfi_diag_portbeacon_rsp_s {
1025 struct bfi_mhdr_s mh; /* 4 bytes */
1026};
1027
1028struct bfi_diag_qtest_req_s {
1029 struct bfi_mhdr_s mh; /* 4 bytes */
1030 u32 data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */
1031};
1032#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
1033
1034/*
1035 * PHY module specific
1036 */
1037enum bfi_phy_h2i_msgs_e {
1038 BFI_PHY_H2I_QUERY_REQ = 1,
1039 BFI_PHY_H2I_STATS_REQ = 2,
1040 BFI_PHY_H2I_WRITE_REQ = 3,
1041 BFI_PHY_H2I_READ_REQ = 4,
1042};
1043
1044enum bfi_phy_i2h_msgs_e {
1045 BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1),
1046 BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2),
1047 BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3),
1048 BFI_PHY_I2H_READ_RSP = BFA_I2HM(4),
1049};
1050
1051/*
1052 * External PHY query request
1053 */
1054struct bfi_phy_query_req_s {
1055 struct bfi_mhdr_s mh; /* Common msg header */
1056 u8 instance;
1057 u8 rsv[3];
1058 struct bfi_alen_s alen;
1059};
1060
1061/*
1062 * External PHY stats request
1063 */
1064struct bfi_phy_stats_req_s {
1065 struct bfi_mhdr_s mh; /* Common msg header */
1066 u8 instance;
1067 u8 rsv[3];
1068 struct bfi_alen_s alen;
1069};
1070
1071/*
1072 * External PHY write request
1073 */
1074struct bfi_phy_write_req_s {
1075 struct bfi_mhdr_s mh; /* Common msg header */
1076 u8 instance;
1077 u8 last;
1078 u8 rsv[2];
1079 u32 offset;
1080 u32 length;
1081 struct bfi_alen_s alen;
1082};
1083
1084/*
1085 * External PHY read request
1086 */
1087struct bfi_phy_read_req_s {
1088 struct bfi_mhdr_s mh; /* Common msg header */
1089 u8 instance;
1090 u8 rsv[3];
1091 u32 offset;
1092 u32 length;
1093 struct bfi_alen_s alen;
1094};
1095
1096/*
1097 * External PHY query response
1098 */
1099struct bfi_phy_query_rsp_s {
1100 struct bfi_mhdr_s mh; /* Common msg header */
1101 u32 status;
1102};
1103
1104/*
1105 * External PHY stats response
1106 */
1107struct bfi_phy_stats_rsp_s {
1108 struct bfi_mhdr_s mh; /* Common msg header */
1109 u32 status;
1110};
1111
1112/*
1113 * External PHY read response
1114 */
1115struct bfi_phy_read_rsp_s {
1116 struct bfi_mhdr_s mh; /* Common msg header */
1117 u32 status;
1118 u32 length;
1119};
1120
1121/*
1122 * External PHY write response
1123 */
1124struct bfi_phy_write_rsp_s {
1125 struct bfi_mhdr_s mh; /* Common msg header */
1126 u32 status;
1127 u32 length;
1128};
1129
577#pragma pack() 1130#pragma pack()
578 1131
579#endif /* __BFI_H__ */ 1132#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
deleted file mode 100644
index 39ad42b66b5b..000000000000
--- a/drivers/scsi/bfa/bfi_cbreg.h
+++ /dev/null
@@ -1,305 +0,0 @@
1
2/*
3 * bfi_cbreg.h crossbow host block register definitions
4 *
5 * !!! Do not edit. Auto generated. !!!
6 */
7
8#ifndef __BFI_CBREG_H__
9#define __BFI_CBREG_H__
10
11
12#define HOSTFN0_INT_STATUS 0x00014000
13#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
14#define __HOSTFN0_INT_STATUS_LVL_SH 20
15#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
16#define __HOSTFN0_INT_STATUS_P 0x000fffff
17#define HOSTFN0_INT_MSK 0x00014004
18#define HOST_PAGE_NUM_FN0 0x00014008
19#define __HOST_PAGE_NUM_FN 0x000001ff
20#define HOSTFN1_INT_STATUS 0x00014100
21#define __HOSTFN1_INT_STAT_LVL_MK 0x00f00000
22#define __HOSTFN1_INT_STAT_LVL_SH 20
23#define __HOSTFN1_INT_STAT_LVL(_v) ((_v) << __HOSTFN1_INT_STAT_LVL_SH)
24#define __HOSTFN1_INT_STAT_P 0x000fffff
25#define HOSTFN1_INT_MSK 0x00014104
26#define HOST_PAGE_NUM_FN1 0x00014108
27#define APP_PLL_400_CTL_REG 0x00014204
28#define __P_400_PLL_LOCK 0x80000000
29#define __APP_PLL_400_SRAM_USE_100MHZ 0x00100000
30#define __APP_PLL_400_RESET_TIMER_MK 0x000e0000
31#define __APP_PLL_400_RESET_TIMER_SH 17
32#define __APP_PLL_400_RESET_TIMER(_v) ((_v) << __APP_PLL_400_RESET_TIMER_SH)
33#define __APP_PLL_400_LOGIC_SOFT_RESET 0x00010000
34#define __APP_PLL_400_CNTLMT0_1_MK 0x0000c000
35#define __APP_PLL_400_CNTLMT0_1_SH 14
36#define __APP_PLL_400_CNTLMT0_1(_v) ((_v) << __APP_PLL_400_CNTLMT0_1_SH)
37#define __APP_PLL_400_JITLMT0_1_MK 0x00003000
38#define __APP_PLL_400_JITLMT0_1_SH 12
39#define __APP_PLL_400_JITLMT0_1(_v) ((_v) << __APP_PLL_400_JITLMT0_1_SH)
40#define __APP_PLL_400_HREF 0x00000800
41#define __APP_PLL_400_HDIV 0x00000400
42#define __APP_PLL_400_P0_1_MK 0x00000300
43#define __APP_PLL_400_P0_1_SH 8
44#define __APP_PLL_400_P0_1(_v) ((_v) << __APP_PLL_400_P0_1_SH)
45#define __APP_PLL_400_Z0_2_MK 0x000000e0
46#define __APP_PLL_400_Z0_2_SH 5
47#define __APP_PLL_400_Z0_2(_v) ((_v) << __APP_PLL_400_Z0_2_SH)
48#define __APP_PLL_400_RSEL200500 0x00000010
49#define __APP_PLL_400_ENARST 0x00000008
50#define __APP_PLL_400_BYPASS 0x00000004
51#define __APP_PLL_400_LRESETN 0x00000002
52#define __APP_PLL_400_ENABLE 0x00000001
53#define APP_PLL_212_CTL_REG 0x00014208
54#define __P_212_PLL_LOCK 0x80000000
55#define __APP_PLL_212_RESET_TIMER_MK 0x000e0000
56#define __APP_PLL_212_RESET_TIMER_SH 17
57#define __APP_PLL_212_RESET_TIMER(_v) ((_v) << __APP_PLL_212_RESET_TIMER_SH)
58#define __APP_PLL_212_LOGIC_SOFT_RESET 0x00010000
59#define __APP_PLL_212_CNTLMT0_1_MK 0x0000c000
60#define __APP_PLL_212_CNTLMT0_1_SH 14
61#define __APP_PLL_212_CNTLMT0_1(_v) ((_v) << __APP_PLL_212_CNTLMT0_1_SH)
62#define __APP_PLL_212_JITLMT0_1_MK 0x00003000
63#define __APP_PLL_212_JITLMT0_1_SH 12
64#define __APP_PLL_212_JITLMT0_1(_v) ((_v) << __APP_PLL_212_JITLMT0_1_SH)
65#define __APP_PLL_212_HREF 0x00000800
66#define __APP_PLL_212_HDIV 0x00000400
67#define __APP_PLL_212_P0_1_MK 0x00000300
68#define __APP_PLL_212_P0_1_SH 8
69#define __APP_PLL_212_P0_1(_v) ((_v) << __APP_PLL_212_P0_1_SH)
70#define __APP_PLL_212_Z0_2_MK 0x000000e0
71#define __APP_PLL_212_Z0_2_SH 5
72#define __APP_PLL_212_Z0_2(_v) ((_v) << __APP_PLL_212_Z0_2_SH)
73#define __APP_PLL_212_RSEL200500 0x00000010
74#define __APP_PLL_212_ENARST 0x00000008
75#define __APP_PLL_212_BYPASS 0x00000004
76#define __APP_PLL_212_LRESETN 0x00000002
77#define __APP_PLL_212_ENABLE 0x00000001
78#define HOST_SEM0_REG 0x00014230
79#define __HOST_SEMAPHORE 0x00000001
80#define HOST_SEM1_REG 0x00014234
81#define HOST_SEM2_REG 0x00014238
82#define HOST_SEM3_REG 0x0001423c
83#define HOST_SEM0_INFO_REG 0x00014240
84#define HOST_SEM1_INFO_REG 0x00014244
85#define HOST_SEM2_INFO_REG 0x00014248
86#define HOST_SEM3_INFO_REG 0x0001424c
87#define HOSTFN0_LPU0_CMD_STAT 0x00019000
88#define __HOSTFN0_LPU0_MBOX_INFO_MK 0xfffffffe
89#define __HOSTFN0_LPU0_MBOX_INFO_SH 1
90#define __HOSTFN0_LPU0_MBOX_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH)
91#define __HOSTFN0_LPU0_MBOX_CMD_STATUS 0x00000001
92#define LPU0_HOSTFN0_CMD_STAT 0x00019008
93#define __LPU0_HOSTFN0_MBOX_INFO_MK 0xfffffffe
94#define __LPU0_HOSTFN0_MBOX_INFO_SH 1
95#define __LPU0_HOSTFN0_MBOX_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH)
96#define __LPU0_HOSTFN0_MBOX_CMD_STATUS 0x00000001
97#define HOSTFN1_LPU1_CMD_STAT 0x00019014
98#define __HOSTFN1_LPU1_MBOX_INFO_MK 0xfffffffe
99#define __HOSTFN1_LPU1_MBOX_INFO_SH 1
100#define __HOSTFN1_LPU1_MBOX_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH)
101#define __HOSTFN1_LPU1_MBOX_CMD_STATUS 0x00000001
102#define LPU1_HOSTFN1_CMD_STAT 0x0001901c
103#define __LPU1_HOSTFN1_MBOX_INFO_MK 0xfffffffe
104#define __LPU1_HOSTFN1_MBOX_INFO_SH 1
105#define __LPU1_HOSTFN1_MBOX_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH)
106#define __LPU1_HOSTFN1_MBOX_CMD_STATUS 0x00000001
107#define CPE_Q0_DEPTH 0x00010014
108#define CPE_Q0_PI 0x0001001c
109#define CPE_Q0_CI 0x00010020
110#define CPE_Q1_DEPTH 0x00010034
111#define CPE_Q1_PI 0x0001003c
112#define CPE_Q1_CI 0x00010040
113#define CPE_Q2_DEPTH 0x00010054
114#define CPE_Q2_PI 0x0001005c
115#define CPE_Q2_CI 0x00010060
116#define CPE_Q3_DEPTH 0x00010074
117#define CPE_Q3_PI 0x0001007c
118#define CPE_Q3_CI 0x00010080
119#define CPE_Q4_DEPTH 0x00010094
120#define CPE_Q4_PI 0x0001009c
121#define CPE_Q4_CI 0x000100a0
122#define CPE_Q5_DEPTH 0x000100b4
123#define CPE_Q5_PI 0x000100bc
124#define CPE_Q5_CI 0x000100c0
125#define CPE_Q6_DEPTH 0x000100d4
126#define CPE_Q6_PI 0x000100dc
127#define CPE_Q6_CI 0x000100e0
128#define CPE_Q7_DEPTH 0x000100f4
129#define CPE_Q7_PI 0x000100fc
130#define CPE_Q7_CI 0x00010100
131#define RME_Q0_DEPTH 0x00011014
132#define RME_Q0_PI 0x0001101c
133#define RME_Q0_CI 0x00011020
134#define RME_Q1_DEPTH 0x00011034
135#define RME_Q1_PI 0x0001103c
136#define RME_Q1_CI 0x00011040
137#define RME_Q2_DEPTH 0x00011054
138#define RME_Q2_PI 0x0001105c
139#define RME_Q2_CI 0x00011060
140#define RME_Q3_DEPTH 0x00011074
141#define RME_Q3_PI 0x0001107c
142#define RME_Q3_CI 0x00011080
143#define RME_Q4_DEPTH 0x00011094
144#define RME_Q4_PI 0x0001109c
145#define RME_Q4_CI 0x000110a0
146#define RME_Q5_DEPTH 0x000110b4
147#define RME_Q5_PI 0x000110bc
148#define RME_Q5_CI 0x000110c0
149#define RME_Q6_DEPTH 0x000110d4
150#define RME_Q6_PI 0x000110dc
151#define RME_Q6_CI 0x000110e0
152#define RME_Q7_DEPTH 0x000110f4
153#define RME_Q7_PI 0x000110fc
154#define RME_Q7_CI 0x00011100
155#define PSS_CTL_REG 0x00018800
156#define __PSS_I2C_CLK_DIV_MK 0x00030000
157#define __PSS_I2C_CLK_DIV_SH 16
158#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
159#define __PSS_LMEM_INIT_DONE 0x00001000
160#define __PSS_LMEM_RESET 0x00000200
161#define __PSS_LMEM_INIT_EN 0x00000100
162#define __PSS_LPU1_RESET 0x00000002
163#define __PSS_LPU0_RESET 0x00000001
164#define PSS_ERR_STATUS_REG 0x00018810
165#define __PSS_LMEM1_CORR_ERR 0x00000800
166#define __PSS_LMEM0_CORR_ERR 0x00000400
167#define __PSS_LMEM1_UNCORR_ERR 0x00000200
168#define __PSS_LMEM0_UNCORR_ERR 0x00000100
169#define __PSS_BAL_PERR 0x00000080
170#define __PSS_DIP_IF_ERR 0x00000040
171#define __PSS_IOH_IF_ERR 0x00000020
172#define __PSS_TDS_IF_ERR 0x00000010
173#define __PSS_RDS_IF_ERR 0x00000008
174#define __PSS_SGM_IF_ERR 0x00000004
175#define __PSS_LPU1_RAM_ERR 0x00000002
176#define __PSS_LPU0_RAM_ERR 0x00000001
177#define ERR_SET_REG 0x00018818
178#define __PSS_ERR_STATUS_SET 0x00000fff
179
180
181/*
182 * These definitions are either in error/missing in spec. Its auto-generated
183 * from hard coded values in regparse.pl.
184 */
185#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
186#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
187#define __EMPHPRE_AT_4G_FIX 0x00000003
188#define __SFP_TXRATE_EN_FIX 0x00000100
189#define __SFP_RXRATE_EN_FIX 0x00000080
190
191
192/*
193 * These register definitions are auto-generated from hard coded values
194 * in regparse.pl.
195 */
196#define HOSTFN0_LPU_MBOX0_0 0x00019200
197#define HOSTFN1_LPU_MBOX0_8 0x00019260
198#define LPU_HOSTFN0_MBOX0_0 0x00019280
199#define LPU_HOSTFN1_MBOX0_8 0x000192e0
200
201
202/*
203 * These register mapping definitions are auto-generated from mapping tables
204 * in regparse.pl.
205 */
206#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
207#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
208#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
209#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
210#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
211#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
212
213#define CPE_Q_DEPTH(__n) \
214 (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
215#define CPE_Q_PI(__n) \
216 (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI))
217#define CPE_Q_CI(__n) \
218 (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI))
219#define RME_Q_DEPTH(__n) \
220 (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH))
221#define RME_Q_PI(__n) \
222 (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI))
223#define RME_Q_CI(__n) \
224 (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI))
225
226#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
227#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
228#define CPE_Q_MASK(__q) ((__q) & 0x3)
229#define RME_Q_MASK(__q) ((__q) & 0x3)
230
231
232/*
233 * PCI MSI-X vector defines
234 */
235enum {
236 BFA_MSIX_CPE_Q0 = 0,
237 BFA_MSIX_CPE_Q1 = 1,
238 BFA_MSIX_CPE_Q2 = 2,
239 BFA_MSIX_CPE_Q3 = 3,
240 BFA_MSIX_CPE_Q4 = 4,
241 BFA_MSIX_CPE_Q5 = 5,
242 BFA_MSIX_CPE_Q6 = 6,
243 BFA_MSIX_CPE_Q7 = 7,
244 BFA_MSIX_RME_Q0 = 8,
245 BFA_MSIX_RME_Q1 = 9,
246 BFA_MSIX_RME_Q2 = 10,
247 BFA_MSIX_RME_Q3 = 11,
248 BFA_MSIX_RME_Q4 = 12,
249 BFA_MSIX_RME_Q5 = 13,
250 BFA_MSIX_RME_Q6 = 14,
251 BFA_MSIX_RME_Q7 = 15,
252 BFA_MSIX_ERR_EMC = 16,
253 BFA_MSIX_ERR_LPU0 = 17,
254 BFA_MSIX_ERR_LPU1 = 18,
255 BFA_MSIX_ERR_PSS = 19,
256 BFA_MSIX_MBOX_LPU0 = 20,
257 BFA_MSIX_MBOX_LPU1 = 21,
258 BFA_MSIX_CB_MAX = 22,
259};
260
261/*
262 * And corresponding host interrupt status bit field defines
263 */
264#define __HFN_INT_CPE_Q0 0x00000001U
265#define __HFN_INT_CPE_Q1 0x00000002U
266#define __HFN_INT_CPE_Q2 0x00000004U
267#define __HFN_INT_CPE_Q3 0x00000008U
268#define __HFN_INT_CPE_Q4 0x00000010U
269#define __HFN_INT_CPE_Q5 0x00000020U
270#define __HFN_INT_CPE_Q6 0x00000040U
271#define __HFN_INT_CPE_Q7 0x00000080U
272#define __HFN_INT_RME_Q0 0x00000100U
273#define __HFN_INT_RME_Q1 0x00000200U
274#define __HFN_INT_RME_Q2 0x00000400U
275#define __HFN_INT_RME_Q3 0x00000800U
276#define __HFN_INT_RME_Q4 0x00001000U
277#define __HFN_INT_RME_Q5 0x00002000U
278#define __HFN_INT_RME_Q6 0x00004000U
279#define __HFN_INT_RME_Q7 0x00008000U
280#define __HFN_INT_ERR_EMC 0x00010000U
281#define __HFN_INT_ERR_LPU0 0x00020000U
282#define __HFN_INT_ERR_LPU1 0x00040000U
283#define __HFN_INT_ERR_PSS 0x00080000U
284#define __HFN_INT_MBOX_LPU0 0x00100000U
285#define __HFN_INT_MBOX_LPU1 0x00200000U
286#define __HFN_INT_MBOX1_LPU0 0x00400000U
287#define __HFN_INT_MBOX1_LPU1 0x00800000U
288#define __HFN_INT_CPE_MASK 0x000000ffU
289#define __HFN_INT_RME_MASK 0x0000ff00U
290
291
292/*
293 * crossbow memory map.
294 */
295#define PSS_SMEM_PAGE_START 0x8000
296#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
297#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
298
299/*
300 * End of crossbow memory map
301 */
302
303
304#endif /* __BFI_CBREG_H__ */
305
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
deleted file mode 100644
index fc4ce4a5a183..000000000000
--- a/drivers/scsi/bfa/bfi_ctreg.h
+++ /dev/null
@@ -1,636 +0,0 @@
1
2/*
3 * bfi_ctreg.h catapult host block register definitions
4 *
5 * !!! Do not edit. Auto generated. !!!
6 */
7
8#ifndef __BFI_CTREG_H__
9#define __BFI_CTREG_H__
10
11
12#define HOSTFN0_LPU_MBOX0_0 0x00019200
13#define HOSTFN1_LPU_MBOX0_8 0x00019260
14#define LPU_HOSTFN0_MBOX0_0 0x00019280
15#define LPU_HOSTFN1_MBOX0_8 0x000192e0
16#define HOSTFN2_LPU_MBOX0_0 0x00019400
17#define HOSTFN3_LPU_MBOX0_8 0x00019460
18#define LPU_HOSTFN2_MBOX0_0 0x00019480
19#define LPU_HOSTFN3_MBOX0_8 0x000194e0
20#define HOSTFN0_INT_STATUS 0x00014000
21#define __HOSTFN0_HALT_OCCURRED 0x01000000
22#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
23#define __HOSTFN0_INT_STATUS_LVL_SH 20
24#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
25#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
26#define __HOSTFN0_INT_STATUS_P_SH 16
27#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
28#define __HOSTFN0_INT_STATUS_F 0x0000ffff
29#define HOSTFN0_INT_MSK 0x00014004
30#define HOST_PAGE_NUM_FN0 0x00014008
31#define __HOST_PAGE_NUM_FN 0x000001ff
32#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
33#define __MSIX_ERR_INDEX_FN 0x000001ff
34#define HOSTFN1_INT_STATUS 0x00014100
35#define __HOSTFN1_HALT_OCCURRED 0x01000000
36#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
37#define __HOSTFN1_INT_STATUS_LVL_SH 20
38#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
39#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
40#define __HOSTFN1_INT_STATUS_P_SH 16
41#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
42#define __HOSTFN1_INT_STATUS_F 0x0000ffff
43#define HOSTFN1_INT_MSK 0x00014104
44#define HOST_PAGE_NUM_FN1 0x00014108
45#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
46#define APP_PLL_425_CTL_REG 0x00014204
47#define __P_425_PLL_LOCK 0x80000000
48#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
49#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
50#define __APP_PLL_425_RESET_TIMER_SH 17
51#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
52#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
53#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
54#define __APP_PLL_425_CNTLMT0_1_SH 14
55#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
56#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
57#define __APP_PLL_425_JITLMT0_1_SH 12
58#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
59#define __APP_PLL_425_HREF 0x00000800
60#define __APP_PLL_425_HDIV 0x00000400
61#define __APP_PLL_425_P0_1_MK 0x00000300
62#define __APP_PLL_425_P0_1_SH 8
63#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
64#define __APP_PLL_425_Z0_2_MK 0x000000e0
65#define __APP_PLL_425_Z0_2_SH 5
66#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
67#define __APP_PLL_425_RSEL200500 0x00000010
68#define __APP_PLL_425_ENARST 0x00000008
69#define __APP_PLL_425_BYPASS 0x00000004
70#define __APP_PLL_425_LRESETN 0x00000002
71#define __APP_PLL_425_ENABLE 0x00000001
72#define APP_PLL_312_CTL_REG 0x00014208
73#define __P_312_PLL_LOCK 0x80000000
74#define __ENABLE_MAC_AHB_1 0x00800000
75#define __ENABLE_MAC_AHB_0 0x00400000
76#define __ENABLE_MAC_1 0x00200000
77#define __ENABLE_MAC_0 0x00100000
78#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
79#define __APP_PLL_312_RESET_TIMER_SH 17
80#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
81#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
82#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
83#define __APP_PLL_312_CNTLMT0_1_SH 14
84#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
85#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
86#define __APP_PLL_312_JITLMT0_1_SH 12
87#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
88#define __APP_PLL_312_HREF 0x00000800
89#define __APP_PLL_312_HDIV 0x00000400
90#define __APP_PLL_312_P0_1_MK 0x00000300
91#define __APP_PLL_312_P0_1_SH 8
92#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
93#define __APP_PLL_312_Z0_2_MK 0x000000e0
94#define __APP_PLL_312_Z0_2_SH 5
95#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
96#define __APP_PLL_312_RSEL200500 0x00000010
97#define __APP_PLL_312_ENARST 0x00000008
98#define __APP_PLL_312_BYPASS 0x00000004
99#define __APP_PLL_312_LRESETN 0x00000002
100#define __APP_PLL_312_ENABLE 0x00000001
101#define MBIST_CTL_REG 0x00014220
102#define __EDRAM_BISTR_START 0x00000004
103#define __MBIST_RESET 0x00000002
104#define __MBIST_START 0x00000001
105#define MBIST_STAT_REG 0x00014224
106#define __EDRAM_BISTR_STATUS 0x00000008
107#define __EDRAM_BISTR_DONE 0x00000004
108#define __MEM_BIT_STATUS 0x00000002
109#define __MBIST_DONE 0x00000001
110#define HOST_SEM0_REG 0x00014230
111#define __HOST_SEMAPHORE 0x00000001
112#define HOST_SEM1_REG 0x00014234
113#define HOST_SEM2_REG 0x00014238
114#define HOST_SEM3_REG 0x0001423c
115#define HOST_SEM0_INFO_REG 0x00014240
116#define HOST_SEM1_INFO_REG 0x00014244
117#define HOST_SEM2_INFO_REG 0x00014248
118#define HOST_SEM3_INFO_REG 0x0001424c
119#define ETH_MAC_SER_REG 0x00014288
120#define __APP_EMS_CKBUFAMPIN 0x00000020
121#define __APP_EMS_REFCLKSEL 0x00000010
122#define __APP_EMS_CMLCKSEL 0x00000008
123#define __APP_EMS_REFCKBUFEN2 0x00000004
124#define __APP_EMS_REFCKBUFEN1 0x00000002
125#define __APP_EMS_CHANNEL_SEL 0x00000001
126#define HOSTFN2_INT_STATUS 0x00014300
127#define __HOSTFN2_HALT_OCCURRED 0x01000000
128#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
129#define __HOSTFN2_INT_STATUS_LVL_SH 20
130#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
131#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
132#define __HOSTFN2_INT_STATUS_P_SH 16
133#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
134#define __HOSTFN2_INT_STATUS_F 0x0000ffff
135#define HOSTFN2_INT_MSK 0x00014304
136#define HOST_PAGE_NUM_FN2 0x00014308
137#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
138#define HOSTFN3_INT_STATUS 0x00014400
139#define __HALT_OCCURRED 0x01000000
140#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
141#define __HOSTFN3_INT_STATUS_LVL_SH 20
142#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
143#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
144#define __HOSTFN3_INT_STATUS_P_SH 16
145#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
146#define __HOSTFN3_INT_STATUS_F 0x0000ffff
147#define HOSTFN3_INT_MSK 0x00014404
148#define HOST_PAGE_NUM_FN3 0x00014408
149#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
150#define FNC_ID_REG 0x00014600
151#define __FUNCTION_NUMBER 0x00000007
152#define FNC_PERS_REG 0x00014604
153#define __F3_FUNCTION_ACTIVE 0x80000000
154#define __F3_FUNCTION_MODE 0x40000000
155#define __F3_PORT_MAP_MK 0x30000000
156#define __F3_PORT_MAP_SH 28
157#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
158#define __F3_VM_MODE 0x08000000
159#define __F3_INTX_STATUS_MK 0x07000000
160#define __F3_INTX_STATUS_SH 24
161#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
162#define __F2_FUNCTION_ACTIVE 0x00800000
163#define __F2_FUNCTION_MODE 0x00400000
164#define __F2_PORT_MAP_MK 0x00300000
165#define __F2_PORT_MAP_SH 20
166#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
167#define __F2_VM_MODE 0x00080000
168#define __F2_INTX_STATUS_MK 0x00070000
169#define __F2_INTX_STATUS_SH 16
170#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
171#define __F1_FUNCTION_ACTIVE 0x00008000
172#define __F1_FUNCTION_MODE 0x00004000
173#define __F1_PORT_MAP_MK 0x00003000
174#define __F1_PORT_MAP_SH 12
175#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
176#define __F1_VM_MODE 0x00000800
177#define __F1_INTX_STATUS_MK 0x00000700
178#define __F1_INTX_STATUS_SH 8
179#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
180#define __F0_FUNCTION_ACTIVE 0x00000080
181#define __F0_FUNCTION_MODE 0x00000040
182#define __F0_PORT_MAP_MK 0x00000030
183#define __F0_PORT_MAP_SH 4
184#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
185#define __F0_VM_MODE 0x00000008
186#define __F0_INTX_STATUS 0x00000007
187enum {
188 __F0_INTX_STATUS_MSIX = 0x0,
189 __F0_INTX_STATUS_INTA = 0x1,
190 __F0_INTX_STATUS_INTB = 0x2,
191 __F0_INTX_STATUS_INTC = 0x3,
192 __F0_INTX_STATUS_INTD = 0x4,
193};
194#define OP_MODE 0x0001460c
195#define __APP_ETH_CLK_LOWSPEED 0x00000004
196#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
197#define __GLOBAL_FCOE_MODE 0x00000001
198#define HOST_SEM4_REG 0x00014610
199#define HOST_SEM5_REG 0x00014614
200#define HOST_SEM6_REG 0x00014618
201#define HOST_SEM7_REG 0x0001461c
202#define HOST_SEM4_INFO_REG 0x00014620
203#define HOST_SEM5_INFO_REG 0x00014624
204#define HOST_SEM6_INFO_REG 0x00014628
205#define HOST_SEM7_INFO_REG 0x0001462c
206#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
207#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
208#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
209#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
210#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
211#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
212#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
213#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
214#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
215#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
216#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
217#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
218#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
219#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
220#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
221#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
222#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
223#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
224#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
225#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
226#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
227#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
228#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
229#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
230#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
231#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
232#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
233#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
234#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
235#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
236#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
237#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
238#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
239#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
240#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
241#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
242#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
243#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
244#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
245#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
246#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
247#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
248#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
249#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
250#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
251#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
252#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
253#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
254#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
255#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
256#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
257#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
258#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
259#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
260#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
261#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
262#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
263#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
264#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
265#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
266#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
267#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
268#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
269#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
270#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
271#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
272#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
273#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
274#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
275#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
276#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
277#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
278#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
279#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
280#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
281#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
282#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
283#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
284#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
285#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
286#define FW_INIT_HALT_P0 0x000191ac
287#define __FW_INIT_HALT_P 0x00000001
288#define FW_INIT_HALT_P1 0x000191bc
289#define CPE_PI_PTR_Q0 0x00038000
290#define __CPE_PI_UNUSED_MK 0xffff0000
291#define __CPE_PI_UNUSED_SH 16
292#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
293#define __CPE_PI_PTR 0x0000ffff
294#define CPE_PI_PTR_Q1 0x00038040
295#define CPE_CI_PTR_Q0 0x00038004
296#define __CPE_CI_UNUSED_MK 0xffff0000
297#define __CPE_CI_UNUSED_SH 16
298#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
299#define __CPE_CI_PTR 0x0000ffff
300#define CPE_CI_PTR_Q1 0x00038044
301#define CPE_DEPTH_Q0 0x00038008
302#define __CPE_DEPTH_UNUSED_MK 0xf8000000
303#define __CPE_DEPTH_UNUSED_SH 27
304#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
305#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
306#define __CPE_MSIX_VEC_INDEX_SH 16
307#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
308#define __CPE_DEPTH 0x0000ffff
309#define CPE_DEPTH_Q1 0x00038048
310#define CPE_QCTRL_Q0 0x0003800c
311#define __CPE_CTRL_UNUSED30_MK 0xfc000000
312#define __CPE_CTRL_UNUSED30_SH 26
313#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
314#define __CPE_FUNC_INT_CTRL_MK 0x03000000
315#define __CPE_FUNC_INT_CTRL_SH 24
316#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
317enum {
318 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
319 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
320 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
321 __CPE_FUNC_INT_CTRL_HALF = 0x3,
322};
323#define __CPE_CTRL_UNUSED20_MK 0x00f00000
324#define __CPE_CTRL_UNUSED20_SH 20
325#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
326#define __CPE_SCI_TH_MK 0x000f0000
327#define __CPE_SCI_TH_SH 16
328#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
329#define __CPE_CTRL_UNUSED10_MK 0x0000c000
330#define __CPE_CTRL_UNUSED10_SH 14
331#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
332#define __CPE_ACK_PENDING 0x00002000
333#define __CPE_CTRL_UNUSED40_MK 0x00001c00
334#define __CPE_CTRL_UNUSED40_SH 10
335#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
336#define __CPE_PCIEID_MK 0x00000300
337#define __CPE_PCIEID_SH 8
338#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
339#define __CPE_CTRL_UNUSED00_MK 0x000000fe
340#define __CPE_CTRL_UNUSED00_SH 1
341#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
342#define __CPE_ESIZE 0x00000001
343#define CPE_QCTRL_Q1 0x0003804c
344#define __CPE_CTRL_UNUSED31_MK 0xfc000000
345#define __CPE_CTRL_UNUSED31_SH 26
346#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
347#define __CPE_CTRL_UNUSED21_MK 0x00f00000
348#define __CPE_CTRL_UNUSED21_SH 20
349#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
350#define __CPE_CTRL_UNUSED11_MK 0x0000c000
351#define __CPE_CTRL_UNUSED11_SH 14
352#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
353#define __CPE_CTRL_UNUSED41_MK 0x00001c00
354#define __CPE_CTRL_UNUSED41_SH 10
355#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
356#define __CPE_CTRL_UNUSED01_MK 0x000000fe
357#define __CPE_CTRL_UNUSED01_SH 1
358#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
359#define RME_PI_PTR_Q0 0x00038020
360#define __LATENCY_TIME_STAMP_MK 0xffff0000
361#define __LATENCY_TIME_STAMP_SH 16
362#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
363#define __RME_PI_PTR 0x0000ffff
364#define RME_PI_PTR_Q1 0x00038060
365#define RME_CI_PTR_Q0 0x00038024
366#define __DELAY_TIME_STAMP_MK 0xffff0000
367#define __DELAY_TIME_STAMP_SH 16
368#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
369#define __RME_CI_PTR 0x0000ffff
370#define RME_CI_PTR_Q1 0x00038064
371#define RME_DEPTH_Q0 0x00038028
372#define __RME_DEPTH_UNUSED_MK 0xf8000000
373#define __RME_DEPTH_UNUSED_SH 27
374#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
375#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
376#define __RME_MSIX_VEC_INDEX_SH 16
377#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
378#define __RME_DEPTH 0x0000ffff
379#define RME_DEPTH_Q1 0x00038068
380#define RME_QCTRL_Q0 0x0003802c
381#define __RME_INT_LATENCY_TIMER_MK 0xff000000
382#define __RME_INT_LATENCY_TIMER_SH 24
383#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
384#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
385#define __RME_INT_DELAY_TIMER_SH 16
386#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
387#define __RME_INT_DELAY_DISABLE 0x00008000
388#define __RME_DLY_DELAY_DISABLE 0x00004000
389#define __RME_ACK_PENDING 0x00002000
390#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
391#define __RME_CTRL_UNUSED10_MK 0x00000c00
392#define __RME_CTRL_UNUSED10_SH 10
393#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
394#define __RME_PCIEID_MK 0x00000300
395#define __RME_PCIEID_SH 8
396#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
397#define __RME_CTRL_UNUSED00_MK 0x000000fe
398#define __RME_CTRL_UNUSED00_SH 1
399#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
400#define __RME_ESIZE 0x00000001
401#define RME_QCTRL_Q1 0x0003806c
402#define __RME_CTRL_UNUSED11_MK 0x00000c00
403#define __RME_CTRL_UNUSED11_SH 10
404#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
405#define __RME_CTRL_UNUSED01_MK 0x000000fe
406#define __RME_CTRL_UNUSED01_SH 1
407#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
408#define PSS_CTL_REG 0x00018800
409#define __PSS_I2C_CLK_DIV_MK 0x007f0000
410#define __PSS_I2C_CLK_DIV_SH 16
411#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
412#define __PSS_LMEM_INIT_DONE 0x00001000
413#define __PSS_LMEM_RESET 0x00000200
414#define __PSS_LMEM_INIT_EN 0x00000100
415#define __PSS_LPU1_RESET 0x00000002
416#define __PSS_LPU0_RESET 0x00000001
417#define PSS_ERR_STATUS_REG 0x00018810
418#define __PSS_LPU1_TCM_READ_ERR 0x00200000
419#define __PSS_LPU0_TCM_READ_ERR 0x00100000
420#define __PSS_LMEM5_CORR_ERR 0x00080000
421#define __PSS_LMEM4_CORR_ERR 0x00040000
422#define __PSS_LMEM3_CORR_ERR 0x00020000
423#define __PSS_LMEM2_CORR_ERR 0x00010000
424#define __PSS_LMEM1_CORR_ERR 0x00008000
425#define __PSS_LMEM0_CORR_ERR 0x00004000
426#define __PSS_LMEM5_UNCORR_ERR 0x00002000
427#define __PSS_LMEM4_UNCORR_ERR 0x00001000
428#define __PSS_LMEM3_UNCORR_ERR 0x00000800
429#define __PSS_LMEM2_UNCORR_ERR 0x00000400
430#define __PSS_LMEM1_UNCORR_ERR 0x00000200
431#define __PSS_LMEM0_UNCORR_ERR 0x00000100
432#define __PSS_BAL_PERR 0x00000080
433#define __PSS_DIP_IF_ERR 0x00000040
434#define __PSS_IOH_IF_ERR 0x00000020
435#define __PSS_TDS_IF_ERR 0x00000010
436#define __PSS_RDS_IF_ERR 0x00000008
437#define __PSS_SGM_IF_ERR 0x00000004
438#define __PSS_LPU1_RAM_ERR 0x00000002
439#define __PSS_LPU0_RAM_ERR 0x00000001
440#define ERR_SET_REG 0x00018818
441#define __PSS_ERR_STATUS_SET 0x003fffff
442#define PMM_1T_RESET_REG_P0 0x0002381c
443#define __PMM_1T_RESET_P 0x00000001
444#define PMM_1T_RESET_REG_P1 0x00023c1c
445#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
446#define __RXQ0_ADD_VECTORS_P 0x80000000
447#define __RXQ0_STOP_P 0x40000000
448#define __RXQ0_PRD_PTR_P 0x0000ffff
449#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
450#define __RXQ1_ADD_VECTORS_P 0x80000000
451#define __RXQ1_STOP_P 0x40000000
452#define __RXQ1_PRD_PTR_P 0x0000ffff
453#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
454#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
455#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
456#define __TXQ0_ADD_VECTORS_P 0x80000000
457#define __TXQ0_STOP_P 0x40000000
458#define __TXQ0_PRD_PTR_P 0x0000ffff
459#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
460#define __TXQ1_ADD_VECTORS_P 0x80000000
461#define __TXQ1_STOP_P 0x40000000
462#define __TXQ1_PRD_PTR_P 0x0000ffff
463#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
464#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
465#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
466#define __IB1_0_ACK_P 0x80000000
467#define __IB1_0_DISABLE_P 0x40000000
468#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
469#define __IB1_0_COALESCING_CFG_P_SH 16
470#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
471#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
472#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
473#define __IB1_1_ACK_P 0x80000000
474#define __IB1_1_DISABLE_P 0x40000000
475#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
476#define __IB1_1_COALESCING_CFG_P_SH 16
477#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
478#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
479#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
480#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
481#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
482#define __IB2_0_ACK_P 0x80000000
483#define __IB2_0_DISABLE_P 0x40000000
484#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
485#define __IB2_0_COALESCING_CFG_P_SH 16
486#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
487#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
488#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
489#define __IB2_1_ACK_P 0x80000000
490#define __IB2_1_DISABLE_P 0x40000000
491#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
492#define __IB2_1_COALESCING_CFG_P_SH 16
493#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
494#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
496#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
497
498
499/*
500 * These definitions are either in error/missing in spec. Its auto-generated
501 * from hard coded values in regparse.pl.
502 */
503#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
504#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
505#define __EMPHPRE_AT_4G_FIX 0x00000003
506#define __SFP_TXRATE_EN_FIX 0x00000100
507#define __SFP_RXRATE_EN_FIX 0x00000080
508
509
510/*
511 * These register definitions are auto-generated from hard coded values
512 * in regparse.pl.
513 */
514
515
516/*
517 * These register mapping definitions are auto-generated from mapping tables
518 * in regparse.pl.
519 */
520#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
521#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
525#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
526
527#define CPE_DEPTH_Q(__n) \
528 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
529#define CPE_QCTRL_Q(__n) \
530 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
531#define CPE_PI_PTR_Q(__n) \
532 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
533#define CPE_CI_PTR_Q(__n) \
534 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
535#define RME_DEPTH_Q(__n) \
536 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
537#define RME_QCTRL_Q(__n) \
538 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
539#define RME_PI_PTR_Q(__n) \
540 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
541#define RME_CI_PTR_Q(__n) \
542 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
543#define HQM_QSET_RXQ_DRBL_P0(__n) \
544 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
545 (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
546#define HQM_QSET_TXQ_DRBL_P0(__n) \
547 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
548 (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
549#define HQM_QSET_IB_DRBL_1_P0(__n) \
550 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
551 (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
552#define HQM_QSET_IB_DRBL_2_P0(__n) \
553 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
554 (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
555#define HQM_QSET_RXQ_DRBL_P1(__n) \
556 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
557 (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
558#define HQM_QSET_TXQ_DRBL_P1(__n) \
559 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
560 (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
561#define HQM_QSET_IB_DRBL_1_P1(__n) \
562 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
563 (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
564#define HQM_QSET_IB_DRBL_2_P1(__n) \
565 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
566 (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
567
568#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
569#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
570#define CPE_Q_MASK(__q) ((__q) & 0x3)
571#define RME_Q_MASK(__q) ((__q) & 0x3)
572
573
574/*
575 * PCI MSI-X vector defines
576 */
577enum {
578 BFA_MSIX_CPE_Q0 = 0,
579 BFA_MSIX_CPE_Q1 = 1,
580 BFA_MSIX_CPE_Q2 = 2,
581 BFA_MSIX_CPE_Q3 = 3,
582 BFA_MSIX_RME_Q0 = 4,
583 BFA_MSIX_RME_Q1 = 5,
584 BFA_MSIX_RME_Q2 = 6,
585 BFA_MSIX_RME_Q3 = 7,
586 BFA_MSIX_LPU_ERR = 8,
587 BFA_MSIX_CT_MAX = 9,
588};
589
590/*
591 * And corresponding host interrupt status bit field defines
592 */
593#define __HFN_INT_CPE_Q0 0x00000001U
594#define __HFN_INT_CPE_Q1 0x00000002U
595#define __HFN_INT_CPE_Q2 0x00000004U
596#define __HFN_INT_CPE_Q3 0x00000008U
597#define __HFN_INT_CPE_Q4 0x00000010U
598#define __HFN_INT_CPE_Q5 0x00000020U
599#define __HFN_INT_CPE_Q6 0x00000040U
600#define __HFN_INT_CPE_Q7 0x00000080U
601#define __HFN_INT_RME_Q0 0x00000100U
602#define __HFN_INT_RME_Q1 0x00000200U
603#define __HFN_INT_RME_Q2 0x00000400U
604#define __HFN_INT_RME_Q3 0x00000800U
605#define __HFN_INT_RME_Q4 0x00001000U
606#define __HFN_INT_RME_Q5 0x00002000U
607#define __HFN_INT_RME_Q6 0x00004000U
608#define __HFN_INT_RME_Q7 0x00008000U
609#define __HFN_INT_ERR_EMC 0x00010000U
610#define __HFN_INT_ERR_LPU0 0x00020000U
611#define __HFN_INT_ERR_LPU1 0x00040000U
612#define __HFN_INT_ERR_PSS 0x00080000U
613#define __HFN_INT_MBOX_LPU0 0x00100000U
614#define __HFN_INT_MBOX_LPU1 0x00200000U
615#define __HFN_INT_MBOX1_LPU0 0x00400000U
616#define __HFN_INT_MBOX1_LPU1 0x00800000U
617#define __HFN_INT_LL_HALT 0x01000000U
618#define __HFN_INT_CPE_MASK 0x000000ffU
619#define __HFN_INT_RME_MASK 0x0000ff00U
620
621
622/*
623 * catapult memory map.
624 */
625#define LL_PGN_HQM0 0x0096
626#define LL_PGN_HQM1 0x0097
627#define PSS_SMEM_PAGE_START 0x8000
628#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
629#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
630
631/*
632 * End of catapult memory map
633 */
634
635
636#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 19e888a57555..0d9f1fb50db0 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -28,11 +28,17 @@ enum bfi_iocfc_h2i_msgs {
28 BFI_IOCFC_H2I_CFG_REQ = 1, 28 BFI_IOCFC_H2I_CFG_REQ = 1,
29 BFI_IOCFC_H2I_SET_INTR_REQ = 2, 29 BFI_IOCFC_H2I_SET_INTR_REQ = 2,
30 BFI_IOCFC_H2I_UPDATEQ_REQ = 3, 30 BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
31 BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4,
32 BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5,
33 BFI_IOCFC_H2I_FAA_QUERY_REQ = 6,
31}; 34};
32 35
33enum bfi_iocfc_i2h_msgs { 36enum bfi_iocfc_i2h_msgs {
34 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), 37 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
35 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), 38 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
39 BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4),
40 BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5),
41 BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6),
36}; 42};
37 43
38struct bfi_iocfc_cfg_s { 44struct bfi_iocfc_cfg_s {
@@ -40,6 +46,12 @@ struct bfi_iocfc_cfg_s {
40 u8 sense_buf_len; /* SCSI sense length */ 46 u8 sense_buf_len; /* SCSI sense length */
41 u16 rsvd_1; 47 u16 rsvd_1;
42 u32 endian_sig; /* endian signature of host */ 48 u32 endian_sig; /* endian signature of host */
49 u8 rsvd_2;
50 u8 single_msix_vec;
51 u8 rsvd[2];
52 __be16 num_ioim_reqs;
53 __be16 num_fwtio_reqs;
54
43 55
44 /* 56 /*
45 * Request and response circular queue base addresses, size and 57 * Request and response circular queue base addresses, size and
@@ -54,7 +66,8 @@ struct bfi_iocfc_cfg_s {
54 66
55 union bfi_addr_u stats_addr; /* DMA-able address for stats */ 67 union bfi_addr_u stats_addr; /* DMA-able address for stats */
56 union bfi_addr_u cfgrsp_addr; /* config response dma address */ 68 union bfi_addr_u cfgrsp_addr; /* config response dma address */
57 union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */ 69 union bfi_addr_u ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
70 /* IO sense buf base addr segments */
58 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ 71 struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
59}; 72};
60 73
@@ -68,11 +81,25 @@ struct bfi_iocfc_bootwwns {
68 u8 rsvd[7]; 81 u8 rsvd[7];
69}; 82};
70 83
84/**
85 * Queue configuration response from firmware
86 */
87struct bfi_iocfc_qreg_s {
88 u32 cpe_q_ci_off[BFI_IOC_MAX_CQS];
89 u32 cpe_q_pi_off[BFI_IOC_MAX_CQS];
90 u32 cpe_qctl_off[BFI_IOC_MAX_CQS];
91 u32 rme_q_ci_off[BFI_IOC_MAX_CQS];
92 u32 rme_q_pi_off[BFI_IOC_MAX_CQS];
93 u32 rme_qctl_off[BFI_IOC_MAX_CQS];
94 u8 hw_qid[BFI_IOC_MAX_CQS];
95};
96
71struct bfi_iocfc_cfgrsp_s { 97struct bfi_iocfc_cfgrsp_s {
72 struct bfa_iocfc_fwcfg_s fwcfg; 98 struct bfa_iocfc_fwcfg_s fwcfg;
73 struct bfa_iocfc_intr_attr_s intr_attr; 99 struct bfa_iocfc_intr_attr_s intr_attr;
74 struct bfi_iocfc_bootwwns bootwwns; 100 struct bfi_iocfc_bootwwns bootwwns;
75 struct bfi_pbc_s pbc_cfg; 101 struct bfi_pbc_s pbc_cfg;
102 struct bfi_iocfc_qreg_s qreg;
76}; 103};
77 104
78/* 105/*
@@ -150,6 +177,37 @@ union bfi_iocfc_i2h_msg_u {
150 u32 mboxmsg[BFI_IOC_MSGSZ]; 177 u32 mboxmsg[BFI_IOC_MSGSZ];
151}; 178};
152 179
180/*
181 * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message
182 */
183struct bfi_faa_en_dis_s {
184 struct bfi_mhdr_s mh; /* common msg header */
185};
186
187/*
188 * BFI_IOCFC_H2I_FAA_QUERY_REQ message
189 */
190struct bfi_faa_query_s {
191 struct bfi_mhdr_s mh; /* common msg header */
192 u8 faa_status; /* FAA status */
193 u8 addr_source; /* PWWN source */
194 u8 rsvd[2];
195 wwn_t faa; /* Fabric acquired PWWN */
196};
197
198/*
199 * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message
200 */
201struct bfi_faa_en_dis_rsp_s {
202 struct bfi_mhdr_s mh; /* common msg header */
203 u8 status; /* updateq status */
204 u8 rsvd[3];
205};
206
207/*
208 * BFI_IOCFC_I2H_FAA_QUERY_RSP message
209 */
210#define bfi_faa_query_rsp_t struct bfi_faa_query_s
153 211
154enum bfi_fcport_h2i { 212enum bfi_fcport_h2i {
155 BFI_FCPORT_H2I_ENABLE_REQ = (1), 213 BFI_FCPORT_H2I_ENABLE_REQ = (1),
@@ -213,7 +271,8 @@ struct bfi_fcport_enable_req_s {
213struct bfi_fcport_set_svc_params_req_s { 271struct bfi_fcport_set_svc_params_req_s {
214 struct bfi_mhdr_s mh; /* msg header */ 272 struct bfi_mhdr_s mh; /* msg header */
215 __be16 tx_bbcredit; /* Tx credits */ 273 __be16 tx_bbcredit; /* Tx credits */
216 u16 rsvd; 274 u8 bb_scn; /* BB_SC FC credit recovery */
275 u8 rsvd;
217}; 276};
218 277
219/* 278/*
@@ -293,12 +352,12 @@ struct bfi_fcxp_send_req_s {
293 u8 class; /* FC class used for req/rsp */ 352 u8 class; /* FC class used for req/rsp */
294 u8 rsp_timeout; /* timeout in secs, 0-no response */ 353 u8 rsp_timeout; /* timeout in secs, 0-no response */
295 u8 cts; /* continue sequence */ 354 u8 cts; /* continue sequence */
296 u8 lp_tag; /* lport tag */ 355 u8 lp_fwtag; /* lport tag */
297 struct fchs_s fchs; /* request FC header structure */ 356 struct fchs_s fchs; /* request FC header structure */
298 __be32 req_len; /* request payload length */ 357 __be32 req_len; /* request payload length */
299 __be32 rsp_maxlen; /* max response length expected */ 358 __be32 rsp_maxlen; /* max response length expected */
300 struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ 359 struct bfi_alen_s req_alen; /* request buffer */
301 struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ 360 struct bfi_alen_s rsp_alen; /* response buffer */
302}; 361};
303 362
304/* 363/*
@@ -328,7 +387,7 @@ struct bfi_uf_buf_post_s {
328 struct bfi_mhdr_s mh; /* Common msg header */ 387 struct bfi_mhdr_s mh; /* Common msg header */
329 u16 buf_tag; /* buffer tag */ 388 u16 buf_tag; /* buffer tag */
330 __be16 buf_len; /* total buffer length */ 389 __be16 buf_len; /* total buffer length */
331 struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ 390 struct bfi_alen_s alen; /* buffer address/len pair */
332}; 391};
333 392
334struct bfi_uf_frm_rcvd_s { 393struct bfi_uf_frm_rcvd_s {
@@ -346,26 +405,27 @@ enum bfi_lps_h2i_msgs {
346}; 405};
347 406
348enum bfi_lps_i2h_msgs { 407enum bfi_lps_i2h_msgs {
349 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), 408 BFI_LPS_I2H_LOGIN_RSP = BFA_I2HM(1),
350 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), 409 BFI_LPS_I2H_LOGOUT_RSP = BFA_I2HM(2),
351 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3), 410 BFI_LPS_I2H_CVL_EVENT = BFA_I2HM(3),
352}; 411};
353 412
354struct bfi_lps_login_req_s { 413struct bfi_lps_login_req_s {
355 struct bfi_mhdr_s mh; /* common msg header */ 414 struct bfi_mhdr_s mh; /* common msg header */
356 u8 lp_tag; 415 u8 bfa_tag;
357 u8 alpa; 416 u8 alpa;
358 __be16 pdu_size; 417 __be16 pdu_size;
359 wwn_t pwwn; 418 wwn_t pwwn;
360 wwn_t nwwn; 419 wwn_t nwwn;
361 u8 fdisc; 420 u8 fdisc;
362 u8 auth_en; 421 u8 auth_en;
363 u8 rsvd[2]; 422 u8 lps_role;
423 u8 bb_scn;
364}; 424};
365 425
366struct bfi_lps_login_rsp_s { 426struct bfi_lps_login_rsp_s {
367 struct bfi_mhdr_s mh; /* common msg header */ 427 struct bfi_mhdr_s mh; /* common msg header */
368 u8 lp_tag; 428 u8 fw_tag;
369 u8 status; 429 u8 status;
370 u8 lsrjt_rsn; 430 u8 lsrjt_rsn;
371 u8 lsrjt_expl; 431 u8 lsrjt_expl;
@@ -380,31 +440,33 @@ struct bfi_lps_login_rsp_s {
380 mac_t fcf_mac; 440 mac_t fcf_mac;
381 u8 ext_status; 441 u8 ext_status;
382 u8 brcd_switch; /* attached peer is brcd switch */ 442 u8 brcd_switch; /* attached peer is brcd switch */
443 u8 bb_scn; /* atatched port's bb_scn */
444 u8 bfa_tag;
383}; 445};
384 446
385struct bfi_lps_logout_req_s { 447struct bfi_lps_logout_req_s {
386 struct bfi_mhdr_s mh; /* common msg header */ 448 struct bfi_mhdr_s mh; /* common msg header */
387 u8 lp_tag; 449 u8 fw_tag;
388 u8 rsvd[3]; 450 u8 rsvd[3];
389 wwn_t port_name; 451 wwn_t port_name;
390}; 452};
391 453
392struct bfi_lps_logout_rsp_s { 454struct bfi_lps_logout_rsp_s {
393 struct bfi_mhdr_s mh; /* common msg header */ 455 struct bfi_mhdr_s mh; /* common msg header */
394 u8 lp_tag; 456 u8 bfa_tag;
395 u8 status; 457 u8 status;
396 u8 rsvd[2]; 458 u8 rsvd[2];
397}; 459};
398 460
399struct bfi_lps_cvl_event_s { 461struct bfi_lps_cvl_event_s {
400 struct bfi_mhdr_s mh; /* common msg header */ 462 struct bfi_mhdr_s mh; /* common msg header */
401 u8 lp_tag; 463 u8 bfa_tag;
402 u8 rsvd[3]; 464 u8 rsvd[3];
403}; 465};
404 466
405struct bfi_lps_n2n_pid_req_s { 467struct bfi_lps_n2n_pid_req_s {
406 struct bfi_mhdr_s mh; /* common msg header */ 468 struct bfi_mhdr_s mh; /* common msg header */
407 u8 lp_tag; 469 u8 fw_tag;
408 u32 lp_pid:24; 470 u32 lp_pid:24;
409}; 471};
410 472
@@ -439,7 +501,7 @@ struct bfi_rport_create_req_s {
439 u16 bfa_handle; /* host rport handle */ 501 u16 bfa_handle; /* host rport handle */
440 __be16 max_frmsz; /* max rcv pdu size */ 502 __be16 max_frmsz; /* max rcv pdu size */
441 u32 pid:24, /* remote port ID */ 503 u32 pid:24, /* remote port ID */
442 lp_tag:8; /* local port tag */ 504 lp_fwtag:8; /* local port tag */
443 u32 local_pid:24, /* local port ID */ 505 u32 local_pid:24, /* local port ID */
444 cisc:8; 506 cisc:8;
445 u8 fc_class; /* supported FC classes */ 507 u8 fc_class; /* supported FC classes */
@@ -502,62 +564,63 @@ union bfi_rport_i2h_msg_u {
502 * Initiator mode I-T nexus interface defines. 564 * Initiator mode I-T nexus interface defines.
503 */ 565 */
504 566
505enum bfi_itnim_h2i { 567enum bfi_itn_h2i {
506 BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */ 568 BFI_ITN_H2I_CREATE_REQ = 1, /* i-t nexus creation */
507 BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */ 569 BFI_ITN_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
508}; 570};
509 571
510enum bfi_itnim_i2h { 572enum bfi_itn_i2h {
511 BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1), 573 BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1),
512 BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2), 574 BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2),
513 BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3), 575 BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3),
514}; 576};
515 577
516struct bfi_itnim_create_req_s { 578struct bfi_itn_create_req_s {
517 struct bfi_mhdr_s mh; /* common msg header */ 579 struct bfi_mhdr_s mh; /* common msg header */
518 u16 fw_handle; /* f/w handle for itnim */ 580 u16 fw_handle; /* f/w handle for itnim */
519 u8 class; /* FC class for IO */ 581 u8 class; /* FC class for IO */
520 u8 seq_rec; /* sequence recovery support */ 582 u8 seq_rec; /* sequence recovery support */
521 u8 msg_no; /* seq id of the msg */ 583 u8 msg_no; /* seq id of the msg */
584 u8 role;
522}; 585};
523 586
524struct bfi_itnim_create_rsp_s { 587struct bfi_itn_create_rsp_s {
525 struct bfi_mhdr_s mh; /* common msg header */ 588 struct bfi_mhdr_s mh; /* common msg header */
526 u16 bfa_handle; /* bfa handle for itnim */ 589 u16 bfa_handle; /* bfa handle for itnim */
527 u8 status; /* fcp request status */ 590 u8 status; /* fcp request status */
528 u8 seq_id; /* seq id of the msg */ 591 u8 seq_id; /* seq id of the msg */
529}; 592};
530 593
531struct bfi_itnim_delete_req_s { 594struct bfi_itn_delete_req_s {
532 struct bfi_mhdr_s mh; /* common msg header */ 595 struct bfi_mhdr_s mh; /* common msg header */
533 u16 fw_handle; /* f/w itnim handle */ 596 u16 fw_handle; /* f/w itnim handle */
534 u8 seq_id; /* seq id of the msg */ 597 u8 seq_id; /* seq id of the msg */
535 u8 rsvd; 598 u8 rsvd;
536}; 599};
537 600
538struct bfi_itnim_delete_rsp_s { 601struct bfi_itn_delete_rsp_s {
539 struct bfi_mhdr_s mh; /* common msg header */ 602 struct bfi_mhdr_s mh; /* common msg header */
540 u16 bfa_handle; /* bfa handle for itnim */ 603 u16 bfa_handle; /* bfa handle for itnim */
541 u8 status; /* fcp request status */ 604 u8 status; /* fcp request status */
542 u8 seq_id; /* seq id of the msg */ 605 u8 seq_id; /* seq id of the msg */
543}; 606};
544 607
545struct bfi_itnim_sler_event_s { 608struct bfi_itn_sler_event_s {
546 struct bfi_mhdr_s mh; /* common msg header */ 609 struct bfi_mhdr_s mh; /* common msg header */
547 u16 bfa_handle; /* bfa handle for itnim */ 610 u16 bfa_handle; /* bfa handle for itnim */
548 u16 rsvd; 611 u16 rsvd;
549}; 612};
550 613
551union bfi_itnim_h2i_msg_u { 614union bfi_itn_h2i_msg_u {
552 struct bfi_itnim_create_req_s *create_req; 615 struct bfi_itn_create_req_s *create_req;
553 struct bfi_itnim_delete_req_s *delete_req; 616 struct bfi_itn_delete_req_s *delete_req;
554 struct bfi_msg_s *msg; 617 struct bfi_msg_s *msg;
555}; 618};
556 619
557union bfi_itnim_i2h_msg_u { 620union bfi_itn_i2h_msg_u {
558 struct bfi_itnim_create_rsp_s *create_rsp; 621 struct bfi_itn_create_rsp_s *create_rsp;
559 struct bfi_itnim_delete_rsp_s *delete_rsp; 622 struct bfi_itn_delete_rsp_s *delete_rsp;
560 struct bfi_itnim_sler_event_s *sler_event; 623 struct bfi_itn_sler_event_s *sler_event;
561 struct bfi_msg_s *msg; 624 struct bfi_msg_s *msg;
562}; 625};
563 626
@@ -693,7 +756,6 @@ enum bfi_ioim_status {
693 BFI_IOIM_STS_PATHTOV = 8, 756 BFI_IOIM_STS_PATHTOV = 8,
694}; 757};
695 758
696#define BFI_IOIM_SNSLEN (256)
697/* 759/*
698 * I/O response message 760 * I/O response message
699 */ 761 */
@@ -772,4 +834,27 @@ struct bfi_tskim_rsp_s {
772 834
773#pragma pack() 835#pragma pack()
774 836
837/*
838 * Crossbow PCI MSI-X vector defines
839 */
840enum {
841 BFI_MSIX_CPE_QMIN_CB = 0,
842 BFI_MSIX_CPE_QMAX_CB = 7,
843 BFI_MSIX_RME_QMIN_CB = 8,
844 BFI_MSIX_RME_QMAX_CB = 15,
845 BFI_MSIX_CB_MAX = 22,
846};
847
848/*
849 * Catapult FC PCI MSI-X vector defines
850 */
851enum {
852 BFI_MSIX_LPU_ERR_CT = 0,
853 BFI_MSIX_CPE_QMIN_CT = 1,
854 BFI_MSIX_CPE_QMAX_CT = 4,
855 BFI_MSIX_RME_QMIN_CT = 5,
856 BFI_MSIX_RME_QMAX_CT = 8,
857 BFI_MSIX_CT_MAX = 9,
858};
859
775#endif /* __BFI_MS_H__ */ 860#endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644
index 000000000000..d892064b64a8
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
20 */
21
22#ifndef __BFI_REG_H__
23#define __BFI_REG_H__
24
25#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
26#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
27#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
28#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
29#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
30#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
31#define HOSTFN2_INT_MSK 0x00014304 /* ct */
32#define HOSTFN3_INT_MSK 0x00014404 /* ct */
33
34#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
35#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
36#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
37#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
38
39#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
40#define __P_LCLK_PLL_LOCK 0x80000000
41#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
42#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
43#define __APP_PLL_LCLK_RESET_TIMER_SH 17
44#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
45#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
46#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
47#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
48#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
49#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
50#define __APP_PLL_LCLK_JITLMT0_1_SH 12
51#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
52#define __APP_PLL_LCLK_HREF 0x00000800
53#define __APP_PLL_LCLK_HDIV 0x00000400
54#define __APP_PLL_LCLK_P0_1_MK 0x00000300
55#define __APP_PLL_LCLK_P0_1_SH 8
56#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
57#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
58#define __APP_PLL_LCLK_Z0_2_SH 5
59#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
60#define __APP_PLL_LCLK_RSEL200500 0x00000010
61#define __APP_PLL_LCLK_ENARST 0x00000008
62#define __APP_PLL_LCLK_BYPASS 0x00000004
63#define __APP_PLL_LCLK_LRESETN 0x00000002
64#define __APP_PLL_LCLK_ENABLE 0x00000001
65#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
66#define __P_SCLK_PLL_LOCK 0x80000000
67#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
68#define __APP_PLL_SCLK_RESET_TIMER_SH 17
69#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
70#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
71#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
72#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
73#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
74#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
75#define __APP_PLL_SCLK_JITLMT0_1_SH 12
76#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
77#define __APP_PLL_SCLK_HREF 0x00000800
78#define __APP_PLL_SCLK_HDIV 0x00000400
79#define __APP_PLL_SCLK_P0_1_MK 0x00000300
80#define __APP_PLL_SCLK_P0_1_SH 8
81#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
82#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
83#define __APP_PLL_SCLK_Z0_2_SH 5
84#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
85#define __APP_PLL_SCLK_RSEL200500 0x00000010
86#define __APP_PLL_SCLK_ENARST 0x00000008
87#define __APP_PLL_SCLK_BYPASS 0x00000004
88#define __APP_PLL_SCLK_LRESETN 0x00000002
89#define __APP_PLL_SCLK_ENABLE 0x00000001
90#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
91#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
92#define __ENABLE_MAC_1 0x00200000 /* ct */
93#define __ENABLE_MAC_0 0x00100000 /* ct */
94
95#define HOST_SEM0_REG 0x00014230 /* cb/ct */
96#define HOST_SEM1_REG 0x00014234 /* cb/ct */
97#define HOST_SEM2_REG 0x00014238 /* cb/ct */
98#define HOST_SEM3_REG 0x0001423c /* cb/ct */
99#define HOST_SEM4_REG 0x00014610 /* cb/ct */
100#define HOST_SEM5_REG 0x00014614 /* cb/ct */
101#define HOST_SEM6_REG 0x00014618 /* cb/ct */
102#define HOST_SEM7_REG 0x0001461c /* cb/ct */
103#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
104#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
105#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
106#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
107#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
108#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
109#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
110#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
111
112#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
113#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
114#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
115#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
116#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
117#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
118#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
119#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
120#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
121#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
122#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
123#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
124#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
125#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
126#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
127#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
128
129#define PSS_CTL_REG 0x00018800 /* cb/ct */
130#define __PSS_I2C_CLK_DIV_MK 0x007f0000
131#define __PSS_I2C_CLK_DIV_SH 16
132#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
133#define __PSS_LMEM_INIT_DONE 0x00001000
134#define __PSS_LMEM_RESET 0x00000200
135#define __PSS_LMEM_INIT_EN 0x00000100
136#define __PSS_LPU1_RESET 0x00000002
137#define __PSS_LPU0_RESET 0x00000001
138#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
139#define ERR_SET_REG 0x00018818 /* cb/ct */
140#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
141#define __PSS_GPIO_OUT_REG 0x00000fff
142#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
143#define __PSS_GPIO_OE_REG 0x000000ff
144
145#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
146#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
147#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
148#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
149#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
150#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
151#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
152#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
153
154#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
155#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
156#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
157#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
158
159#define MBIST_CTL_REG 0x00014220 /* ct */
160#define __EDRAM_BISTR_START 0x00000004
161#define MBIST_STAT_REG 0x00014224 /* ct */
162#define ETH_MAC_SER_REG 0x00014288 /* ct */
163#define __APP_EMS_CKBUFAMPIN 0x00000020
164#define __APP_EMS_REFCLKSEL 0x00000010
165#define __APP_EMS_CMLCKSEL 0x00000008
166#define __APP_EMS_REFCKBUFEN2 0x00000004
167#define __APP_EMS_REFCKBUFEN1 0x00000002
168#define __APP_EMS_CHANNEL_SEL 0x00000001
169#define FNC_PERS_REG 0x00014604 /* ct */
170#define __F3_FUNCTION_ACTIVE 0x80000000
171#define __F3_FUNCTION_MODE 0x40000000
172#define __F3_PORT_MAP_MK 0x30000000
173#define __F3_PORT_MAP_SH 28
174#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
175#define __F3_VM_MODE 0x08000000
176#define __F3_INTX_STATUS_MK 0x07000000
177#define __F3_INTX_STATUS_SH 24
178#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
179#define __F2_FUNCTION_ACTIVE 0x00800000
180#define __F2_FUNCTION_MODE 0x00400000
181#define __F2_PORT_MAP_MK 0x00300000
182#define __F2_PORT_MAP_SH 20
183#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
184#define __F2_VM_MODE 0x00080000
185#define __F2_INTX_STATUS_MK 0x00070000
186#define __F2_INTX_STATUS_SH 16
187#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
188#define __F1_FUNCTION_ACTIVE 0x00008000
189#define __F1_FUNCTION_MODE 0x00004000
190#define __F1_PORT_MAP_MK 0x00003000
191#define __F1_PORT_MAP_SH 12
192#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
193#define __F1_VM_MODE 0x00000800
194#define __F1_INTX_STATUS_MK 0x00000700
195#define __F1_INTX_STATUS_SH 8
196#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
197#define __F0_FUNCTION_ACTIVE 0x00000080
198#define __F0_FUNCTION_MODE 0x00000040
199#define __F0_PORT_MAP_MK 0x00000030
200#define __F0_PORT_MAP_SH 4
201#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
202#define __F0_VM_MODE 0x00000008
203#define __F0_INTX_STATUS 0x00000007
204enum {
205 __F0_INTX_STATUS_MSIX = 0x0,
206 __F0_INTX_STATUS_INTA = 0x1,
207 __F0_INTX_STATUS_INTB = 0x2,
208 __F0_INTX_STATUS_INTC = 0x3,
209 __F0_INTX_STATUS_INTD = 0x4,
210};
211
212#define OP_MODE 0x0001460c /* ct */
213#define __APP_ETH_CLK_LOWSPEED 0x00000004
214#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
215#define __GLOBAL_FCOE_MODE 0x00000001
216#define FW_INIT_HALT_P0 0x000191ac /* ct */
217#define __FW_INIT_HALT_P 0x00000001
218#define FW_INIT_HALT_P1 0x000191bc /* ct */
219#define PMM_1T_RESET_REG_P0 0x0002381c /* ct */
220#define __PMM_1T_RESET_P 0x00000001
221#define PMM_1T_RESET_REG_P1 0x00023c1c /* ct */
222
223/**
224 * Catapult-2 specific defines
225 */
226#define CT2_PCI_CPQ_BASE 0x00030000
227#define CT2_PCI_APP_BASE 0x00030100
228#define CT2_PCI_ETH_BASE 0x00030400
229
230/*
231 * APP block registers
232 */
233#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
234#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
235#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
236#define __PME_STATUS_ 0x00200000
237#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
238#define __PF_VF_BAR_SIZE_MODE__SH 19
239#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
240#define __FC_LL_PORT_MAP__MK 0x00060000
241#define __FC_LL_PORT_MAP__SH 17
242#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
243#define __PF_VF_ACTIVE_ 0x00010000
244#define __PF_VF_CFG_RDY_ 0x00008000
245#define __PF_VF_ENABLE_ 0x00004000
246#define __PF_DRIVER_ACTIVE_ 0x00002000
247#define __PF_PME_SEND_ENABLE_ 0x00001000
248#define __PF_EXROM_OFFSET__MK 0x00000ff0
249#define __PF_EXROM_OFFSET__SH 4
250#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
251#define __FC_LL_MODE_ 0x00000008
252#define __PF_INTX_PIN_ 0x00000007
253#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
254#define __PF_NUM_QUEUES1__MK 0xff000000
255#define __PF_NUM_QUEUES1__SH 24
256#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
257#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
258#define __PF_VF_QUE_OFFSET1__SH 16
259#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
260#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
261#define __PF_VF_NUM_QUEUES__SH 8
262#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
263#define __PF_VF_QUE_OFFSET_ 0x000000ff
264#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
265#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
266
267/*
268 * Catapult-2 CPQ block registers
269 */
270#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
271#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
272#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
273#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
274#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
275#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
276#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
277#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
278#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
279#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
280#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
281#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
282#define CT2_HOST_SEM0_REG 0x000148f0
283#define CT2_HOST_SEM1_REG 0x000148f4
284#define CT2_HOST_SEM2_REG 0x000148f8
285#define CT2_HOST_SEM3_REG 0x000148fc
286#define CT2_HOST_SEM4_REG 0x00014900
287#define CT2_HOST_SEM5_REG 0x00014904
288#define CT2_HOST_SEM6_REG 0x00014908
289#define CT2_HOST_SEM7_REG 0x0001490c
290#define CT2_HOST_SEM0_INFO_REG 0x000148b0
291#define CT2_HOST_SEM1_INFO_REG 0x000148b4
292#define CT2_HOST_SEM2_INFO_REG 0x000148b8
293#define CT2_HOST_SEM3_INFO_REG 0x000148bc
294#define CT2_HOST_SEM4_INFO_REG 0x000148c0
295#define CT2_HOST_SEM5_INFO_REG 0x000148c4
296#define CT2_HOST_SEM6_INFO_REG 0x000148c8
297#define CT2_HOST_SEM7_INFO_REG 0x000148cc
298
299#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
300#define __APP_LPUCLK_HALFSPEED 0x40000000
301#define __APP_PLL_LCLK_LOAD 0x20000000
302#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
303#define __APP_PLL_LCLK_FBCNT_SH 21
304#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
305enum {
306 __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
307 __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
308};
309#define __APP_PLL_LCLK_EXTFB 0x00000800
310#define __APP_PLL_LCLK_ENOUTS 0x00000400
311#define __APP_PLL_LCLK_RATE 0x00000010
312#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
313#define __P_SCLK_PLL_LOCK 0x80000000
314#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
315#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
316#define __APP_PLL_SCLK_LOAD 0x10000000
317#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
318#define __APP_PLL_SCLK_FBCNT_SH 20
319#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
320enum {
321 __APP_PLL_SCLK_FBCNT_NORM = 6,
322 __APP_PLL_SCLK_FBCNT_10G_FC = 10,
323};
324#define __APP_PLL_SCLK_EXTFB 0x00000800
325#define __APP_PLL_SCLK_ENOUTS 0x00000400
326#define __APP_PLL_SCLK_RATE 0x00000010
327#define CT2_PCIE_MISC_REG 0x00014804
328#define __ETH_CLK_ENABLE_PORT1 0x00000010
329#define CT2_CHIP_MISC_PRG 0x000148a4
330#define __ETH_CLK_ENABLE_PORT0 0x00004000
331#define __APP_LPU_SPEED 0x00000002
332#define CT2_MBIST_STAT_REG 0x00014818
333#define CT2_MBIST_CTL_REG 0x0001481c
334#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
335#define __PMM_1T_PNDB_P 0x00000002
336#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
337#define CT2_WGN_STATUS 0x00014990
338#define __WGN_READY 0x00000400
339#define __GLBL_PF_VF_CFG_RDY 0x00000200
340#define CT2_NFC_CSR_SET_REG 0x00027424
341#define __HALT_NFC_CONTROLLER 0x00000002
342#define __NFC_CONTROLLER_HALTED 0x00001000
343
344#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
345#define __CSI_MAC_RESET 0x00000010
346#define __CSI_MAC_AHB_RESET 0x00000008
347#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
348#define CT2_CSI_MAC_CONTROL_REG(__n) \
349 (CT2_CSI_MAC0_CONTROL_REG + \
350 (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
351
352/*
353 * Name semaphore registers based on usage
354 */
355#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
356#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
357#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
358#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
359#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
360#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
361
362/*
363 * CT2 semaphore register locations changed
364 */
365#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
366#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
367#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
368#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
369#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
370#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
371
372#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
373#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
374
375/*
376 * And corresponding host interrupt status bit field defines
377 */
378#define __HFN_INT_CPE_Q0 0x00000001U
379#define __HFN_INT_CPE_Q1 0x00000002U
380#define __HFN_INT_CPE_Q2 0x00000004U
381#define __HFN_INT_CPE_Q3 0x00000008U
382#define __HFN_INT_CPE_Q4 0x00000010U
383#define __HFN_INT_CPE_Q5 0x00000020U
384#define __HFN_INT_CPE_Q6 0x00000040U
385#define __HFN_INT_CPE_Q7 0x00000080U
386#define __HFN_INT_RME_Q0 0x00000100U
387#define __HFN_INT_RME_Q1 0x00000200U
388#define __HFN_INT_RME_Q2 0x00000400U
389#define __HFN_INT_RME_Q3 0x00000800U
390#define __HFN_INT_RME_Q4 0x00001000U
391#define __HFN_INT_RME_Q5 0x00002000U
392#define __HFN_INT_RME_Q6 0x00004000U
393#define __HFN_INT_RME_Q7 0x00008000U
394#define __HFN_INT_ERR_EMC 0x00010000U
395#define __HFN_INT_ERR_LPU0 0x00020000U
396#define __HFN_INT_ERR_LPU1 0x00040000U
397#define __HFN_INT_ERR_PSS 0x00080000U
398#define __HFN_INT_MBOX_LPU0 0x00100000U
399#define __HFN_INT_MBOX_LPU1 0x00200000U
400#define __HFN_INT_MBOX1_LPU0 0x00400000U
401#define __HFN_INT_MBOX1_LPU1 0x00800000U
402#define __HFN_INT_LL_HALT 0x01000000U
403#define __HFN_INT_CPE_MASK 0x000000ffU
404#define __HFN_INT_RME_MASK 0x0000ff00U
405#define __HFN_INT_ERR_MASK \
406 (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
407 __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
408#define __HFN_INT_FN0_MASK \
409 (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
410 __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
411 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
412#define __HFN_INT_FN1_MASK \
413 (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
414 __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
415 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
416
417/*
418 * Host interrupt status defines for catapult-2
419 */
420#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
421#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
422#define __HFN_INT_ERR_PSS_CT2 0x00040000U
423#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
424#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
425#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
426#define __HFN_INT_ERR_WGN_CT2 0x00400000U
427#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
428#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
429#define __HFN_INT_ERR_MASK_CT2 \
430 (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
431 __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
432 __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
433 __HFN_INT_ERR_LEHTX_CT2)
434#define __HFN_INT_FN0_MASK_CT2 \
435 (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
436 __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
437 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
438#define __HFN_INT_FN1_MASK_CT2 \
439 (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
440 __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
441 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
442
443/*
444 * asic memory map.
445 */
446#define PSS_SMEM_PAGE_START 0x8000
447#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
448#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
449
450#endif /* __BFI_REG_H__ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 907672e86063..d924236e1b91 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -152,7 +152,6 @@ struct bnx2fc_percpu_s {
152 spinlock_t fp_work_lock; 152 spinlock_t fp_work_lock;
153}; 153};
154 154
155
156struct bnx2fc_hba { 155struct bnx2fc_hba {
157 struct list_head link; 156 struct list_head link;
158 struct cnic_dev *cnic; 157 struct cnic_dev *cnic;
@@ -179,6 +178,7 @@ struct bnx2fc_hba {
179 #define BNX2FC_CTLR_INIT_DONE 1 178 #define BNX2FC_CTLR_INIT_DONE 1
180 #define BNX2FC_CREATE_DONE 2 179 #define BNX2FC_CREATE_DONE 2
181 struct fcoe_ctlr ctlr; 180 struct fcoe_ctlr ctlr;
181 struct list_head vports;
182 u8 vlan_enabled; 182 u8 vlan_enabled;
183 int vlan_id; 183 int vlan_id;
184 u32 next_conn_id; 184 u32 next_conn_id;
@@ -232,6 +232,11 @@ struct bnx2fc_hba {
232 232
233#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) 233#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
234 234
235struct bnx2fc_lport {
236 struct list_head list;
237 struct fc_lport *lport;
238};
239
235struct bnx2fc_cmd_mgr { 240struct bnx2fc_cmd_mgr {
236 struct bnx2fc_hba *hba; 241 struct bnx2fc_hba *hba;
237 u16 next_idx; 242 u16 next_idx;
@@ -428,6 +433,7 @@ struct bnx2fc_work {
428struct bnx2fc_unsol_els { 433struct bnx2fc_unsol_els {
429 struct fc_lport *lport; 434 struct fc_lport *lport;
430 struct fc_frame *fp; 435 struct fc_frame *fp;
436 struct bnx2fc_hba *hba;
431 struct work_struct unsol_els_work; 437 struct work_struct unsol_els_work;
432}; 438};
433 439
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9eebaebdaa78..a97aff3a0662 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -679,6 +679,9 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
679 case SPEED_1000: 679 case SPEED_1000:
680 lport->link_speed = FC_PORTSPEED_1GBIT; 680 lport->link_speed = FC_PORTSPEED_1GBIT;
681 break; 681 break;
682 case SPEED_2500:
683 lport->link_speed = FC_PORTSPEED_2GBIT;
684 break;
682 case SPEED_10000: 685 case SPEED_10000:
683 lport->link_speed = FC_PORTSPEED_10GBIT; 686 lport->link_speed = FC_PORTSPEED_10GBIT;
684 break; 687 break;
@@ -1231,6 +1234,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
1231 hba->ctlr.get_src_addr = bnx2fc_get_src_mac; 1234 hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
1232 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done); 1235 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
1233 1236
1237 INIT_LIST_HEAD(&hba->vports);
1234 rc = bnx2fc_netdev_setup(hba); 1238 rc = bnx2fc_netdev_setup(hba);
1235 if (rc) 1239 if (rc)
1236 goto setup_err; 1240 goto setup_err;
@@ -1267,8 +1271,15 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1267 struct fcoe_port *port; 1271 struct fcoe_port *port;
1268 struct Scsi_Host *shost; 1272 struct Scsi_Host *shost;
1269 struct fc_vport *vport = dev_to_vport(parent); 1273 struct fc_vport *vport = dev_to_vport(parent);
1274 struct bnx2fc_lport *blport;
1270 int rc = 0; 1275 int rc = 0;
1271 1276
1277 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1278 if (!blport) {
1279 BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n");
1280 return NULL;
1281 }
1282
1272 /* Allocate Scsi_Host structure */ 1283 /* Allocate Scsi_Host structure */
1273 if (!npiv) 1284 if (!npiv)
1274 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); 1285 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
@@ -1277,7 +1288,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1277 1288
1278 if (!lport) { 1289 if (!lport) {
1279 printk(KERN_ERR PFX "could not allocate scsi host structure\n"); 1290 printk(KERN_ERR PFX "could not allocate scsi host structure\n");
1280 return NULL; 1291 goto free_blport;
1281 } 1292 }
1282 shost = lport->host; 1293 shost = lport->host;
1283 port = lport_priv(lport); 1294 port = lport_priv(lport);
@@ -1333,12 +1344,20 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1333 } 1344 }
1334 1345
1335 bnx2fc_interface_get(hba); 1346 bnx2fc_interface_get(hba);
1347
1348 spin_lock_bh(&hba->hba_lock);
1349 blport->lport = lport;
1350 list_add_tail(&blport->list, &hba->vports);
1351 spin_unlock_bh(&hba->hba_lock);
1352
1336 return lport; 1353 return lport;
1337 1354
1338shost_err: 1355shost_err:
1339 scsi_remove_host(shost); 1356 scsi_remove_host(shost);
1340lp_config_err: 1357lp_config_err:
1341 scsi_host_put(lport->host); 1358 scsi_host_put(lport->host);
1359free_blport:
1360 kfree(blport);
1342 return NULL; 1361 return NULL;
1343} 1362}
1344 1363
@@ -1354,6 +1373,7 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1354{ 1373{
1355 struct fcoe_port *port = lport_priv(lport); 1374 struct fcoe_port *port = lport_priv(lport);
1356 struct bnx2fc_hba *hba = port->priv; 1375 struct bnx2fc_hba *hba = port->priv;
1376 struct bnx2fc_lport *blport, *tmp;
1357 1377
1358 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n"); 1378 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
1359 /* Stop the transmit retry timer */ 1379 /* Stop the transmit retry timer */
@@ -1378,6 +1398,15 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1378 /* Free memory used by statistical counters */ 1398 /* Free memory used by statistical counters */
1379 fc_lport_free_stats(lport); 1399 fc_lport_free_stats(lport);
1380 1400
1401 spin_lock_bh(&hba->hba_lock);
1402 list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
1403 if (blport->lport == lport) {
1404 list_del(&blport->list);
1405 kfree(blport);
1406 }
1407 }
1408 spin_unlock_bh(&hba->hba_lock);
1409
1381 /* Release Scsi_Host */ 1410 /* Release Scsi_Host */
1382 scsi_host_put(lport->host); 1411 scsi_host_put(lport->host);
1383 1412
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index d8e8a825560d..09bdd9b88d1a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -486,16 +486,36 @@ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
486 return rc; 486 return rc;
487} 487}
488 488
489static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
490{
491 struct bnx2fc_lport *blport;
492
493 spin_lock_bh(&hba->hba_lock);
494 list_for_each_entry(blport, &hba->vports, list) {
495 if (blport->lport == lport) {
496 spin_unlock_bh(&hba->hba_lock);
497 return true;
498 }
499 }
500 spin_unlock_bh(&hba->hba_lock);
501 return false;
502
503}
504
505
489static void bnx2fc_unsol_els_work(struct work_struct *work) 506static void bnx2fc_unsol_els_work(struct work_struct *work)
490{ 507{
491 struct bnx2fc_unsol_els *unsol_els; 508 struct bnx2fc_unsol_els *unsol_els;
492 struct fc_lport *lport; 509 struct fc_lport *lport;
510 struct bnx2fc_hba *hba;
493 struct fc_frame *fp; 511 struct fc_frame *fp;
494 512
495 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); 513 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
496 lport = unsol_els->lport; 514 lport = unsol_els->lport;
497 fp = unsol_els->fp; 515 fp = unsol_els->fp;
498 fc_exch_recv(lport, fp); 516 hba = unsol_els->hba;
517 if (is_valid_lport(hba, lport))
518 fc_exch_recv(lport, fp);
499 kfree(unsol_els); 519 kfree(unsol_els);
500} 520}
501 521
@@ -505,6 +525,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
505{ 525{
506 struct fcoe_port *port = tgt->port; 526 struct fcoe_port *port = tgt->port;
507 struct fc_lport *lport = port->lport; 527 struct fc_lport *lport = port->lport;
528 struct bnx2fc_hba *hba = port->priv;
508 struct bnx2fc_unsol_els *unsol_els; 529 struct bnx2fc_unsol_els *unsol_els;
509 struct fc_frame_header *fh; 530 struct fc_frame_header *fh;
510 struct fc_frame *fp; 531 struct fc_frame *fp;
@@ -565,6 +586,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
565 fr_eof(fp) = FC_EOF_T; 586 fr_eof(fp) = FC_EOF_T;
566 fr_crc(fp) = cpu_to_le32(~crc); 587 fr_crc(fp) = cpu_to_le32(~crc);
567 unsol_els->lport = lport; 588 unsol_els->lport = lport;
589 unsol_els->hba = hba;
568 unsol_els->fp = fp; 590 unsol_els->fp = fp;
569 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); 591 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
570 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); 592 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 5dc4205ed8af..45eba6d609c9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1743,7 +1743,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1743 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1743 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1744 return; 1744 return;
1745 } 1745 }
1746 io_req->sc_cmd = NULL;
1747 1746
1748 if (io_req->on_active_queue) { 1747 if (io_req->on_active_queue) {
1749 list_del_init(&io_req->link); 1748 list_del_init(&io_req->link);
@@ -1763,6 +1762,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1763 } 1762 }
1764 1763
1765 bnx2fc_unmap_sg_list(io_req); 1764 bnx2fc_unmap_sg_list(io_req);
1765 io_req->sc_cmd = NULL;
1766 1766
1767 switch (io_req->fcp_status) { 1767 switch (io_req->fcp_status) {
1768 case FC_GOOD: 1768 case FC_GOOD:
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 15673cc786ff..57515f1f1690 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI 1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 * 2 *
3 * Copyright (c) 2006 - 2010 Broadcom Corporation 3 * Copyright (c) 2006 - 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 71890a063cd3..72118db89a20 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. 1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 * 2 *
3 * Copyright (c) 2006 - 2010 Broadcom Corporation 3 * Copyright (c) 2006 - 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index e7cb7ecf6847..dc5700765db4 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2010 Broadcom Corporation 3 * Copyright (c) 2006 - 2011 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -22,11 +22,14 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h>
25#include <linux/sched.h> 26#include <linux/sched.h>
26#include <linux/in.h> 27#include <linux/in.h>
27#include <linux/kfifo.h> 28#include <linux/kfifo.h>
28#include <linux/netdevice.h> 29#include <linux/netdevice.h>
29#include <linux/completion.h> 30#include <linux/completion.h>
31#include <linux/kthread.h>
32#include <linux/cpu.h>
30 33
31#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h> 35#include <scsi/scsi_device.h>
@@ -202,10 +205,13 @@ struct io_bdt {
202/** 205/**
203 * bnx2i_cmd - iscsi command structure 206 * bnx2i_cmd - iscsi command structure
204 * 207 *
208 * @hdr: iSCSI header
209 * @conn: iscsi_conn pointer
205 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd 210 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
206 * @sg: SG list 211 * @sg: SG list
207 * @io_tbl: buffer descriptor (BD) table 212 * @io_tbl: buffer descriptor (BD) table
208 * @bd_tbl_dma: buffer descriptor (BD) table's dma address 213 * @bd_tbl_dma: buffer descriptor (BD) table's dma address
214 * @req: bnx2i specific command request struct
209 */ 215 */
210struct bnx2i_cmd { 216struct bnx2i_cmd {
211 struct iscsi_hdr hdr; 217 struct iscsi_hdr hdr;
@@ -229,6 +235,7 @@ struct bnx2i_cmd {
229 * @gen_pdu: login/nopout/logout pdu resources 235 * @gen_pdu: login/nopout/logout pdu resources
230 * @violation_notified: bit mask used to track iscsi error/warning messages 236 * @violation_notified: bit mask used to track iscsi error/warning messages
231 * already printed out 237 * already printed out
238 * @work_cnt: keeps track of the number of outstanding work
232 * 239 *
233 * iSCSI connection structure 240 * iSCSI connection structure
234 */ 241 */
@@ -252,6 +259,8 @@ struct bnx2i_conn {
252 */ 259 */
253 struct generic_pdu_resc gen_pdu; 260 struct generic_pdu_resc gen_pdu;
254 u64 violation_notified; 261 u64 violation_notified;
262
263 atomic_t work_cnt;
255}; 264};
256 265
257 266
@@ -661,7 +670,6 @@ enum {
661 * @hba: adapter to which this connection belongs 670 * @hba: adapter to which this connection belongs
662 * @conn: iscsi connection this EP is linked to 671 * @conn: iscsi connection this EP is linked to
663 * @cls_ep: associated iSCSI endpoint pointer 672 * @cls_ep: associated iSCSI endpoint pointer
664 * @sess: iscsi session this EP is linked to
665 * @cm_sk: cnic sock struct 673 * @cm_sk: cnic sock struct
666 * @hba_age: age to detect if 'iscsid' issues ep_disconnect() 674 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
667 * after HBA reset is completed by bnx2i/cnic/bnx2 675 * after HBA reset is completed by bnx2i/cnic/bnx2
@@ -687,7 +695,7 @@ struct bnx2i_endpoint {
687 u32 hba_age; 695 u32 hba_age;
688 u32 state; 696 u32 state;
689 unsigned long timestamp; 697 unsigned long timestamp;
690 int num_active_cmds; 698 atomic_t num_active_cmds;
691 u32 ec_shift; 699 u32 ec_shift;
692 700
693 struct qp_info qp; 701 struct qp_info qp;
@@ -700,6 +708,19 @@ struct bnx2i_endpoint {
700}; 708};
701 709
702 710
711struct bnx2i_work {
712 struct list_head list;
713 struct iscsi_session *session;
714 struct bnx2i_conn *bnx2i_conn;
715 struct cqe cqe;
716};
717
718struct bnx2i_percpu_s {
719 struct task_struct *iothread;
720 struct list_head work_list;
721 spinlock_t p_work_lock;
722};
723
703 724
704/* Global variables */ 725/* Global variables */
705extern unsigned int error_mask1, error_mask2; 726extern unsigned int error_mask1, error_mask2;
@@ -783,7 +804,7 @@ extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
783 struct bnx2i_hba *hba, u32 iscsi_cid); 804 struct bnx2i_hba *hba, u32 iscsi_cid);
784 805
785extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); 806extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
786extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); 807extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
787 808
788extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep); 809extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
789 810
@@ -793,4 +814,8 @@ extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
793extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); 814extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
794extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); 815extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
795 816
817extern int bnx2i_percpu_io_thread(void *arg);
818extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
819 struct bnx2i_conn *bnx2i_conn,
820 struct cqe *cqe);
796#endif 821#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 372d30c099cc..030a96c646c3 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2010 Broadcom Corporation 3 * Copyright (c) 2006 - 2011 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -17,6 +17,8 @@
17#include <scsi/libiscsi.h> 17#include <scsi/libiscsi.h>
18#include "bnx2i.h" 18#include "bnx2i.h"
19 19
20DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
21
20/** 22/**
21 * bnx2i_get_cid_num - get cid from ep 23 * bnx2i_get_cid_num - get cid from ep
22 * @ep: endpoint pointer 24 * @ep: endpoint pointer
@@ -131,16 +133,16 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
131 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is 133 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
132 * outstanding and on chip timer expires 134 * outstanding and on chip timer expires
133 */ 135 */
134void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) 136int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
135{ 137{
136 struct bnx2i_5771x_cq_db *cq_db; 138 struct bnx2i_5771x_cq_db *cq_db;
137 u16 cq_index; 139 u16 cq_index;
138 u16 next_index; 140 u16 next_index = 0;
139 u32 num_active_cmds; 141 u32 num_active_cmds;
140 142
141 /* Coalesce CQ entries only on 10G devices */ 143 /* Coalesce CQ entries only on 10G devices */
142 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 144 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
143 return; 145 return 0;
144 146
145 /* Do not update CQ DB multiple times before firmware writes 147 /* Do not update CQ DB multiple times before firmware writes
146 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious 148 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
@@ -150,16 +152,17 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
150 152
151 if (action != CNIC_ARM_CQE_FP) 153 if (action != CNIC_ARM_CQE_FP)
152 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF) 154 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
153 return; 155 return 0;
154 156
155 if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) { 157 if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
156 num_active_cmds = ep->num_active_cmds; 158 num_active_cmds = atomic_read(&ep->num_active_cmds);
157 if (num_active_cmds <= event_coal_min) 159 if (num_active_cmds <= event_coal_min)
158 next_index = 1; 160 next_index = 1;
159 else 161 else {
160 next_index = event_coal_min + 162 next_index = num_active_cmds >> ep->ec_shift;
161 ((num_active_cmds - event_coal_min) >> 163 if (next_index > num_active_cmds - event_coal_min)
162 ep->ec_shift); 164 next_index = num_active_cmds - event_coal_min;
165 }
163 if (!next_index) 166 if (!next_index)
164 next_index = 1; 167 next_index = 1;
165 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; 168 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -170,6 +173,7 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
170 173
171 cq_db->sqn[0] = cq_index; 174 cq_db->sqn[0] = cq_index;
172 } 175 }
176 return next_index;
173} 177}
174 178
175 179
@@ -265,7 +269,7 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
265 struct bnx2i_5771x_sq_rq_db *sq_db; 269 struct bnx2i_5771x_sq_rq_db *sq_db;
266 struct bnx2i_endpoint *ep = bnx2i_conn->ep; 270 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
267 271
268 ep->num_active_cmds++; 272 atomic_inc(&ep->num_active_cmds);
269 wmb(); /* flush SQ WQE memory before the doorbell is rung */ 273 wmb(); /* flush SQ WQE memory before the doorbell is rung */
270 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 274 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
271 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; 275 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
@@ -430,7 +434,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
430 default: 434 default:
431 tmfabort_wqe->ref_itt = RESERVED_ITT; 435 tmfabort_wqe->ref_itt = RESERVED_ITT;
432 } 436 }
433 memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun)); 437 memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun));
434 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); 438 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
435 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); 439 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
436 440
@@ -547,7 +551,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
547 551
548 nopout_wqe->op_code = nopout_hdr->opcode; 552 nopout_wqe->op_code = nopout_hdr->opcode;
549 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; 553 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
550 memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); 554 memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
551 555
552 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 556 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
553 u32 tmp = nopout_wqe->lun[0]; 557 u32 tmp = nopout_wqe->lun[0];
@@ -1331,14 +1335,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1331 1335
1332/** 1336/**
1333 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. 1337 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1334 * @conn: iscsi connection 1338 * @session: iscsi session
1339 * @bnx2i_conn: bnx2i connection
1335 * @cqe: pointer to newly DMA'ed CQE entry for processing 1340 * @cqe: pointer to newly DMA'ed CQE entry for processing
1336 * 1341 *
1337 * process SCSI CMD Response CQE & complete the request to SCSI-ML 1342 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1338 */ 1343 */
1339static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, 1344int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1340 struct bnx2i_conn *bnx2i_conn, 1345 struct bnx2i_conn *bnx2i_conn,
1341 struct cqe *cqe) 1346 struct cqe *cqe)
1342{ 1347{
1343 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1348 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1344 struct bnx2i_cmd_response *resp_cqe; 1349 struct bnx2i_cmd_response *resp_cqe;
@@ -1348,7 +1353,7 @@ static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1348 u32 datalen = 0; 1353 u32 datalen = 0;
1349 1354
1350 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1355 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1351 spin_lock(&session->lock); 1356 spin_lock_bh(&session->lock);
1352 task = iscsi_itt_to_task(conn, 1357 task = iscsi_itt_to_task(conn,
1353 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); 1358 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1354 if (!task) 1359 if (!task)
@@ -1409,7 +1414,7 @@ done:
1409 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, 1414 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1410 conn->data, datalen); 1415 conn->data, datalen);
1411fail: 1416fail:
1412 spin_unlock(&session->lock); 1417 spin_unlock_bh(&session->lock);
1413 return 0; 1418 return 0;
1414} 1419}
1415 1420
@@ -1711,7 +1716,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1711 hdr->flags = ISCSI_FLAG_CMD_FINAL; 1716 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1712 hdr->itt = task->hdr->itt; 1717 hdr->itt = task->hdr->itt;
1713 hdr->ttt = cpu_to_be32(nop_in->ttt); 1718 hdr->ttt = cpu_to_be32(nop_in->ttt);
1714 memcpy(hdr->lun, nop_in->lun, 8); 1719 memcpy(&hdr->lun, nop_in->lun, 8);
1715 } 1720 }
1716done: 1721done:
1717 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1722 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1754,7 +1759,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
1754 resp_hdr->opcode = async_cqe->op_code; 1759 resp_hdr->opcode = async_cqe->op_code;
1755 resp_hdr->flags = 0x80; 1760 resp_hdr->flags = 0x80;
1756 1761
1757 memcpy(resp_hdr->lun, async_cqe->lun, 8); 1762 memcpy(&resp_hdr->lun, async_cqe->lun, 8);
1758 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); 1763 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1759 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); 1764 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1760 1765
@@ -1836,21 +1841,136 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1836} 1841}
1837 1842
1838 1843
1844/**
1845 * bnx2i_percpu_io_thread - thread per cpu for ios
1846 *
1847 * @arg: ptr to bnx2i_percpu_info structure
1848 */
1849int bnx2i_percpu_io_thread(void *arg)
1850{
1851 struct bnx2i_percpu_s *p = arg;
1852 struct bnx2i_work *work, *tmp;
1853 LIST_HEAD(work_list);
1854
1855 set_user_nice(current, -20);
1856
1857 while (!kthread_should_stop()) {
1858 spin_lock_bh(&p->p_work_lock);
1859 while (!list_empty(&p->work_list)) {
1860 list_splice_init(&p->work_list, &work_list);
1861 spin_unlock_bh(&p->p_work_lock);
1862
1863 list_for_each_entry_safe(work, tmp, &work_list, list) {
1864 list_del_init(&work->list);
1865 /* work allocated in the bh, freed here */
1866 bnx2i_process_scsi_cmd_resp(work->session,
1867 work->bnx2i_conn,
1868 &work->cqe);
1869 atomic_dec(&work->bnx2i_conn->work_cnt);
1870 kfree(work);
1871 }
1872 spin_lock_bh(&p->p_work_lock);
1873 }
1874 set_current_state(TASK_INTERRUPTIBLE);
1875 spin_unlock_bh(&p->p_work_lock);
1876 schedule();
1877 }
1878 __set_current_state(TASK_RUNNING);
1879
1880 return 0;
1881}
1882
1883
1884/**
1885 * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread
1886 * @bnx2i_conn: bnx2i connection
1887 *
1888 * this function is called by generic KCQ handler to queue all pending cmd
1889 * completion CQEs
1890 *
1891 * The implementation is to queue the cmd response based on the
1892 * last recorded command for the given connection. The
1893 * cpu_id gets recorded upon task_xmit. No out-of-order completion!
1894 */
1895static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
1896 struct bnx2i_conn *bnx2i_conn,
1897 struct bnx2i_nop_in_msg *cqe)
1898{
1899 struct bnx2i_work *bnx2i_work = NULL;
1900 struct bnx2i_percpu_s *p = NULL;
1901 struct iscsi_task *task;
1902 struct scsi_cmnd *sc;
1903 int rc = 0;
1904 int cpu;
1905
1906 spin_lock(&session->lock);
1907 task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
1908 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1909 if (!task) {
1910 spin_unlock(&session->lock);
1911 return -EINVAL;
1912 }
1913 sc = task->sc;
1914 spin_unlock(&session->lock);
1915
1916 if (!blk_rq_cpu_valid(sc->request))
1917 cpu = smp_processor_id();
1918 else
1919 cpu = sc->request->cpu;
1920
1921 p = &per_cpu(bnx2i_percpu, cpu);
1922 spin_lock(&p->p_work_lock);
1923 if (unlikely(!p->iothread)) {
1924 rc = -EINVAL;
1925 goto err;
1926 }
1927 /* Alloc and copy to the cqe */
1928 bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC);
1929 if (bnx2i_work) {
1930 INIT_LIST_HEAD(&bnx2i_work->list);
1931 bnx2i_work->session = session;
1932 bnx2i_work->bnx2i_conn = bnx2i_conn;
1933 memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe));
1934 list_add_tail(&bnx2i_work->list, &p->work_list);
1935 atomic_inc(&bnx2i_conn->work_cnt);
1936 wake_up_process(p->iothread);
1937 spin_unlock(&p->p_work_lock);
1938 goto done;
1939 } else
1940 rc = -ENOMEM;
1941err:
1942 spin_unlock(&p->p_work_lock);
1943 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe);
1944done:
1945 return rc;
1946}
1947
1839 1948
1840/** 1949/**
1841 * bnx2i_process_new_cqes - process newly DMA'ed CQE's 1950 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1842 * @bnx2i_conn: iscsi connection 1951 * @bnx2i_conn: bnx2i connection
1843 * 1952 *
1844 * this function is called by generic KCQ handler to process all pending CQE's 1953 * this function is called by generic KCQ handler to process all pending CQE's
1845 */ 1954 */
1846static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) 1955static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1847{ 1956{
1848 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; 1957 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1849 struct iscsi_session *session = conn->session; 1958 struct iscsi_session *session = conn->session;
1850 struct qp_info *qp = &bnx2i_conn->ep->qp; 1959 struct qp_info *qp;
1851 struct bnx2i_nop_in_msg *nopin; 1960 struct bnx2i_nop_in_msg *nopin;
1852 int tgt_async_msg; 1961 int tgt_async_msg;
1962 int cqe_cnt = 0;
1853 1963
1964 if (bnx2i_conn->ep == NULL)
1965 return 0;
1966
1967 qp = &bnx2i_conn->ep->qp;
1968
1969 if (!qp->cq_virt) {
1970 printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
1971 bnx2i_conn->hba->netdev->name);
1972 goto out;
1973 }
1854 while (1) { 1974 while (1) {
1855 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; 1975 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1856 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) 1976 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
@@ -1873,8 +1993,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1873 switch (nopin->op_code) { 1993 switch (nopin->op_code) {
1874 case ISCSI_OP_SCSI_CMD_RSP: 1994 case ISCSI_OP_SCSI_CMD_RSP:
1875 case ISCSI_OP_SCSI_DATA_IN: 1995 case ISCSI_OP_SCSI_DATA_IN:
1876 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, 1996 /* Run the kthread engine only for data cmds
1877 qp->cq_cons_qe); 1997 All other cmds will be completed in this bh! */
1998 bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
1878 break; 1999 break;
1879 case ISCSI_OP_LOGIN_RSP: 2000 case ISCSI_OP_LOGIN_RSP:
1880 bnx2i_process_login_resp(session, bnx2i_conn, 2001 bnx2i_process_login_resp(session, bnx2i_conn,
@@ -1918,13 +2039,21 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1918 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", 2039 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1919 nopin->op_code); 2040 nopin->op_code);
1920 } 2041 }
1921 if (!tgt_async_msg) 2042 if (!tgt_async_msg) {
1922 bnx2i_conn->ep->num_active_cmds--; 2043 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
2044 printk(KERN_ALERT "bnx2i (%s): no active cmd! "
2045 "op 0x%x\n",
2046 bnx2i_conn->hba->netdev->name,
2047 nopin->op_code);
2048 else
2049 atomic_dec(&bnx2i_conn->ep->num_active_cmds);
2050 }
1923cqe_out: 2051cqe_out:
1924 /* clear out in production version only, till beta keep opcode 2052 /* clear out in production version only, till beta keep opcode
1925 * field intact, will be helpful in debugging (context dump) 2053 * field intact, will be helpful in debugging (context dump)
1926 * nopin->op_code = 0; 2054 * nopin->op_code = 0;
1927 */ 2055 */
2056 cqe_cnt++;
1928 qp->cqe_exp_seq_sn++; 2057 qp->cqe_exp_seq_sn++;
1929 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) 2058 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
1930 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; 2059 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
@@ -1937,6 +2066,8 @@ cqe_out:
1937 qp->cq_cons_idx++; 2066 qp->cq_cons_idx++;
1938 } 2067 }
1939 } 2068 }
2069out:
2070 return cqe_cnt;
1940} 2071}
1941 2072
1942/** 2073/**
@@ -1952,6 +2083,7 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1952{ 2083{
1953 struct bnx2i_conn *bnx2i_conn; 2084 struct bnx2i_conn *bnx2i_conn;
1954 u32 iscsi_cid; 2085 u32 iscsi_cid;
2086 int nxt_idx;
1955 2087
1956 iscsi_cid = new_cqe_kcqe->iscsi_conn_id; 2088 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1957 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 2089 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
@@ -1964,9 +2096,12 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1964 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); 2096 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1965 return; 2097 return;
1966 } 2098 }
2099
1967 bnx2i_process_new_cqes(bnx2i_conn); 2100 bnx2i_process_new_cqes(bnx2i_conn);
1968 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); 2101 nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep,
1969 bnx2i_process_new_cqes(bnx2i_conn); 2102 CNIC_ARM_CQE_FP);
2103 if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn))
2104 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
1970} 2105}
1971 2106
1972 2107
@@ -2312,7 +2447,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2312 printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " 2447 printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
2313 "opcode\n", hba->netdev->name); 2448 "opcode\n", hba->netdev->name);
2314 else if (ofld_kcqe->completion_status == 2449 else if (ofld_kcqe->completion_status ==
2315 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY) 2450 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
2316 /* error status code valid only for 5771x chipset */ 2451 /* error status code valid only for 5771x chipset */
2317 ep->state = EP_STATE_OFLD_FAILED_CID_BUSY; 2452 ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
2318 else 2453 else
@@ -2517,7 +2652,7 @@ static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2517 2652
2518 2653
2519static int bnx2i_send_nl_mesg(void *context, u32 msg_type, 2654static int bnx2i_send_nl_mesg(void *context, u32 msg_type,
2520 char *buf, u16 buflen) 2655 char *buf, u16 buflen)
2521{ 2656{
2522 struct bnx2i_hba *hba = context; 2657 struct bnx2i_hba *hba = context;
2523 int rc; 2658 int rc;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 6973413e91ec..1a947f1b9729 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2010 Broadcom Corporation 3 * Copyright (c) 2006 - 2011 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
18static u32 adapter_count; 18static u32 adapter_count;
19 19
20#define DRV_MODULE_NAME "bnx2i" 20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.6.2.3" 21#define DRV_MODULE_VERSION "2.7.0.3"
22#define DRV_MODULE_RELDATE "Dec 31, 2010" 22#define DRV_MODULE_RELDATE "Jun 15, 2011"
23 23
24static char version[] __devinitdata = 24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -40,7 +40,7 @@ unsigned int event_coal_min = 24;
40module_param(event_coal_min, int, 0664); 40module_param(event_coal_min, int, 0664);
41MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands"); 41MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
42 42
43unsigned int event_coal_div = 1; 43unsigned int event_coal_div = 2;
44module_param(event_coal_div, int, 0664); 44module_param(event_coal_div, int, 0664);
45MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); 45MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
46 46
@@ -66,6 +66,15 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size");
66 66
67u64 iscsi_error_mask = 0x00; 67u64 iscsi_error_mask = 0x00;
68 68
69DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
70
71static int bnx2i_cpu_callback(struct notifier_block *nfb,
72 unsigned long action, void *hcpu);
73/* notification function for CPU hotplug events */
74static struct notifier_block bnx2i_cpu_notifier = {
75 .notifier_call = bnx2i_cpu_callback,
76};
77
69 78
70/** 79/**
71 * bnx2i_identify_device - identifies NetXtreme II device type 80 * bnx2i_identify_device - identifies NetXtreme II device type
@@ -172,21 +181,14 @@ void bnx2i_start(void *handle)
172 struct bnx2i_hba *hba = handle; 181 struct bnx2i_hba *hba = handle;
173 int i = HZ; 182 int i = HZ;
174 183
175 if (!hba->cnic->max_iscsi_conn) { 184 /*
176 printk(KERN_ALERT "bnx2i: dev %s does not support " 185 * We should never register devices that don't support iSCSI
177 "iSCSI\n", hba->netdev->name); 186 * (see bnx2i_init_one), so something is wrong if we try to
187 * start a iSCSI adapter on hardware with 0 supported iSCSI
188 * connections
189 */
190 BUG_ON(!hba->cnic->max_iscsi_conn);
178 191
179 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
180 mutex_lock(&bnx2i_dev_lock);
181 list_del_init(&hba->link);
182 adapter_count--;
183 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
184 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
185 mutex_unlock(&bnx2i_dev_lock);
186 bnx2i_free_hba(hba);
187 }
188 return;
189 }
190 bnx2i_send_fw_iscsi_init_msg(hba); 192 bnx2i_send_fw_iscsi_init_msg(hba);
191 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) 193 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
192 msleep(BNX2I_INIT_POLL_TIME); 194 msleep(BNX2I_INIT_POLL_TIME);
@@ -290,6 +292,13 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
290 int rc; 292 int rc;
291 293
292 mutex_lock(&bnx2i_dev_lock); 294 mutex_lock(&bnx2i_dev_lock);
295 if (!cnic->max_iscsi_conn) {
296 printk(KERN_ALERT "bnx2i: dev %s does not support "
297 "iSCSI\n", hba->netdev->name);
298 rc = -EOPNOTSUPP;
299 goto out;
300 }
301
293 hba->cnic = cnic; 302 hba->cnic = cnic;
294 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); 303 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
295 if (!rc) { 304 if (!rc) {
@@ -307,6 +316,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
307 else 316 else
308 printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc); 317 printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
309 318
319out:
310 mutex_unlock(&bnx2i_dev_lock); 320 mutex_unlock(&bnx2i_dev_lock);
311 321
312 return rc; 322 return rc;
@@ -371,6 +381,91 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
371 381
372 382
373/** 383/**
384 * bnx2i_percpu_thread_create - Create a receive thread for an
385 * online CPU
386 *
387 * @cpu: cpu index for the online cpu
388 */
389static void bnx2i_percpu_thread_create(unsigned int cpu)
390{
391 struct bnx2i_percpu_s *p;
392 struct task_struct *thread;
393
394 p = &per_cpu(bnx2i_percpu, cpu);
395
396 thread = kthread_create(bnx2i_percpu_io_thread, (void *)p,
397 "bnx2i_thread/%d", cpu);
398 /* bind thread to the cpu */
399 if (likely(!IS_ERR(thread))) {
400 kthread_bind(thread, cpu);
401 p->iothread = thread;
402 wake_up_process(thread);
403 }
404}
405
406
407static void bnx2i_percpu_thread_destroy(unsigned int cpu)
408{
409 struct bnx2i_percpu_s *p;
410 struct task_struct *thread;
411 struct bnx2i_work *work, *tmp;
412
413 /* Prevent any new work from being queued for this CPU */
414 p = &per_cpu(bnx2i_percpu, cpu);
415 spin_lock_bh(&p->p_work_lock);
416 thread = p->iothread;
417 p->iothread = NULL;
418
419 /* Free all work in the list */
420 list_for_each_entry_safe(work, tmp, &p->work_list, list) {
421 list_del_init(&work->list);
422 bnx2i_process_scsi_cmd_resp(work->session,
423 work->bnx2i_conn, &work->cqe);
424 kfree(work);
425 }
426
427 spin_unlock_bh(&p->p_work_lock);
428 if (thread)
429 kthread_stop(thread);
430}
431
432
433/**
434 * bnx2i_cpu_callback - Handler for CPU hotplug events
435 *
436 * @nfb: The callback data block
437 * @action: The event triggering the callback
438 * @hcpu: The index of the CPU that the event is for
439 *
440 * This creates or destroys per-CPU data for iSCSI
441 *
442 * Returns NOTIFY_OK always.
443 */
444static int bnx2i_cpu_callback(struct notifier_block *nfb,
445 unsigned long action, void *hcpu)
446{
447 unsigned cpu = (unsigned long)hcpu;
448
449 switch (action) {
450 case CPU_ONLINE:
451 case CPU_ONLINE_FROZEN:
452 printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
453 cpu);
454 bnx2i_percpu_thread_create(cpu);
455 break;
456 case CPU_DEAD:
457 case CPU_DEAD_FROZEN:
458 printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
459 bnx2i_percpu_thread_destroy(cpu);
460 break;
461 default:
462 break;
463 }
464 return NOTIFY_OK;
465}
466
467
468/**
374 * bnx2i_mod_init - module init entry point 469 * bnx2i_mod_init - module init entry point
375 * 470 *
376 * initialize any driver wide global data structures such as endpoint pool, 471 * initialize any driver wide global data structures such as endpoint pool,
@@ -380,6 +475,8 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
380static int __init bnx2i_mod_init(void) 475static int __init bnx2i_mod_init(void)
381{ 476{
382 int err; 477 int err;
478 unsigned cpu = 0;
479 struct bnx2i_percpu_s *p;
383 480
384 printk(KERN_INFO "%s", version); 481 printk(KERN_INFO "%s", version);
385 482
@@ -402,6 +499,20 @@ static int __init bnx2i_mod_init(void)
402 goto unreg_xport; 499 goto unreg_xport;
403 } 500 }
404 501
502 /* Create percpu kernel threads to handle iSCSI I/O completions */
503 for_each_possible_cpu(cpu) {
504 p = &per_cpu(bnx2i_percpu, cpu);
505 INIT_LIST_HEAD(&p->work_list);
506 spin_lock_init(&p->p_work_lock);
507 p->iothread = NULL;
508 }
509
510 for_each_online_cpu(cpu)
511 bnx2i_percpu_thread_create(cpu);
512
513 /* Initialize per CPU interrupt thread */
514 register_hotcpu_notifier(&bnx2i_cpu_notifier);
515
405 return 0; 516 return 0;
406 517
407unreg_xport: 518unreg_xport:
@@ -422,6 +533,7 @@ out:
422static void __exit bnx2i_mod_exit(void) 533static void __exit bnx2i_mod_exit(void)
423{ 534{
424 struct bnx2i_hba *hba; 535 struct bnx2i_hba *hba;
536 unsigned cpu = 0;
425 537
426 mutex_lock(&bnx2i_dev_lock); 538 mutex_lock(&bnx2i_dev_lock);
427 while (!list_empty(&adapter_list)) { 539 while (!list_empty(&adapter_list)) {
@@ -439,6 +551,11 @@ static void __exit bnx2i_mod_exit(void)
439 } 551 }
440 mutex_unlock(&bnx2i_dev_lock); 552 mutex_unlock(&bnx2i_dev_lock);
441 553
554 unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
555
556 for_each_online_cpu(cpu)
557 bnx2i_percpu_thread_destroy(cpu);
558
442 iscsi_unregister_transport(&bnx2i_iscsi_transport); 559 iscsi_unregister_transport(&bnx2i_iscsi_transport);
443 cnic_unregister_driver(CNIC_ULP_ISCSI); 560 cnic_unregister_driver(CNIC_ULP_ISCSI);
444} 561}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 041928b23cb0..5c55a75ae597 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 * 3 *
4 * Copyright (c) 2006 - 2010 Broadcom Corporation 4 * Copyright (c) 2006 - 2011 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie 6 * Copyright (c) 2007, 2008 Mike Christie
7 * 7 *
@@ -27,6 +27,7 @@ static struct scsi_host_template bnx2i_host_template;
27 */ 27 */
28static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ 28static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
29 29
30DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
30 31
31static int bnx2i_adapter_ready(struct bnx2i_hba *hba) 32static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
32{ 33{
@@ -1214,7 +1215,8 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1214 struct bnx2i_cmd *cmd = task->dd_data; 1215 struct bnx2i_cmd *cmd = task->dd_data;
1215 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1216 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1216 1217
1217 if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes) 1218 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1219 hba->max_sqes)
1218 return -ENOMEM; 1220 return -ENOMEM;
1219 1221
1220 /* 1222 /*
@@ -1354,6 +1356,9 @@ bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1354 bnx2i_conn = conn->dd_data; 1356 bnx2i_conn = conn->dd_data;
1355 bnx2i_conn->cls_conn = cls_conn; 1357 bnx2i_conn->cls_conn = cls_conn;
1356 bnx2i_conn->hba = hba; 1358 bnx2i_conn->hba = hba;
1359
1360 atomic_set(&bnx2i_conn->work_cnt, 0);
1361
1357 /* 'ep' ptr will be assigned in bind() call */ 1362 /* 'ep' ptr will be assigned in bind() call */
1358 bnx2i_conn->ep = NULL; 1363 bnx2i_conn->ep = NULL;
1359 init_completion(&bnx2i_conn->cmd_cleanup_cmpl); 1364 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
@@ -1457,11 +1462,34 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1457 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1462 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1458 struct Scsi_Host *shost; 1463 struct Scsi_Host *shost;
1459 struct bnx2i_hba *hba; 1464 struct bnx2i_hba *hba;
1465 struct bnx2i_work *work, *tmp;
1466 unsigned cpu = 0;
1467 struct bnx2i_percpu_s *p;
1460 1468
1461 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); 1469 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1462 hba = iscsi_host_priv(shost); 1470 hba = iscsi_host_priv(shost);
1463 1471
1464 bnx2i_conn_free_login_resources(hba, bnx2i_conn); 1472 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1473
1474 if (atomic_read(&bnx2i_conn->work_cnt)) {
1475 for_each_online_cpu(cpu) {
1476 p = &per_cpu(bnx2i_percpu, cpu);
1477 spin_lock_bh(&p->p_work_lock);
1478 list_for_each_entry_safe(work, tmp,
1479 &p->work_list, list) {
1480 if (work->session == conn->session &&
1481 work->bnx2i_conn == bnx2i_conn) {
1482 list_del_init(&work->list);
1483 kfree(work);
1484 if (!atomic_dec_and_test(
1485 &bnx2i_conn->work_cnt))
1486 break;
1487 }
1488 }
1489 spin_unlock_bh(&p->p_work_lock);
1490 }
1491 }
1492
1465 iscsi_conn_teardown(cls_conn); 1493 iscsi_conn_teardown(cls_conn);
1466} 1494}
1467 1495
@@ -1769,7 +1797,7 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1769 } 1797 }
1770 bnx2i_ep = ep->dd_data; 1798 bnx2i_ep = ep->dd_data;
1771 1799
1772 bnx2i_ep->num_active_cmds = 0; 1800 atomic_set(&bnx2i_ep->num_active_cmds, 0);
1773 iscsi_cid = bnx2i_alloc_iscsi_cid(hba); 1801 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1774 if (iscsi_cid == -1) { 1802 if (iscsi_cid == -1) {
1775 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " 1803 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
@@ -2163,9 +2191,9 @@ static struct scsi_host_template bnx2i_host_template = {
2163 .eh_device_reset_handler = iscsi_eh_device_reset, 2191 .eh_device_reset_handler = iscsi_eh_device_reset,
2164 .eh_target_reset_handler = iscsi_eh_recover_target, 2192 .eh_target_reset_handler = iscsi_eh_recover_target,
2165 .change_queue_depth = iscsi_change_queue_depth, 2193 .change_queue_depth = iscsi_change_queue_depth,
2166 .can_queue = 1024, 2194 .can_queue = 2048,
2167 .max_sectors = 127, 2195 .max_sectors = 127,
2168 .cmd_per_lun = 24, 2196 .cmd_per_lun = 128,
2169 .this_id = -1, 2197 .this_id = -1,
2170 .use_clustering = ENABLE_CLUSTERING, 2198 .use_clustering = ENABLE_CLUSTERING,
2171 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2199 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 9174196d9033..83a77f7244d2 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2004 - 2010 Broadcom Corporation 3 * Copyright (c) 2004 - 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index abc7b122e050..bd22041e2789 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1245,7 +1245,7 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1245 struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; 1245 struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
1246 struct ulp_iscsi_info uinfo; 1246 struct ulp_iscsi_info uinfo;
1247 unsigned int pgsz_factor[4]; 1247 unsigned int pgsz_factor[4];
1248 int err; 1248 int i, err;
1249 1249
1250 if (ddp) { 1250 if (ddp) {
1251 kref_get(&ddp->refcnt); 1251 kref_get(&ddp->refcnt);
@@ -1271,6 +1271,8 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1271 1271
1272 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; 1272 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1273 cxgbi_ddp_page_size_factor(pgsz_factor); 1273 cxgbi_ddp_page_size_factor(pgsz_factor);
1274 for (i = 0; i < 4; i++)
1275 uinfo.pgsz_factor[i] = pgsz_factor[i];
1274 uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); 1276 uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
1275 1277
1276 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); 1278 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 155d7b9bdeae..204fa8d4b4ab 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -99,7 +99,8 @@ static void fcoe_destroy_work(struct work_struct *);
99static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, 99static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
100 unsigned int); 100 unsigned int);
101static int fcoe_ddp_done(struct fc_lport *, u16); 101static int fcoe_ddp_done(struct fc_lport *, u16);
102 102static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
103 unsigned int);
103static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); 104static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
104 105
105static bool fcoe_match(struct net_device *netdev); 106static bool fcoe_match(struct net_device *netdev);
@@ -143,6 +144,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
143 .frame_send = fcoe_xmit, 144 .frame_send = fcoe_xmit,
144 .ddp_setup = fcoe_ddp_setup, 145 .ddp_setup = fcoe_ddp_setup,
145 .ddp_done = fcoe_ddp_done, 146 .ddp_done = fcoe_ddp_done,
147 .ddp_target = fcoe_ddp_target,
146 .elsct_send = fcoe_elsct_send, 148 .elsct_send = fcoe_elsct_send,
147 .get_lesb = fcoe_get_lesb, 149 .get_lesb = fcoe_get_lesb,
148 .lport_set_port_id = fcoe_set_port_id, 150 .lport_set_port_id = fcoe_set_port_id,
@@ -429,21 +431,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
429 struct fcoe_ctlr *fip = &fcoe->ctlr; 431 struct fcoe_ctlr *fip = &fcoe->ctlr;
430 u8 flogi_maddr[ETH_ALEN]; 432 u8 flogi_maddr[ETH_ALEN];
431 const struct net_device_ops *ops; 433 const struct net_device_ops *ops;
432 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
433
434 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
435
436 /* Logout of the fabric */
437 fc_fabric_logoff(fcoe->ctlr.lp);
438
439 /* Cleanup the fc_lport */
440 fc_lport_destroy(fcoe->ctlr.lp);
441
442 /* Stop the transmit retry timer */
443 del_timer_sync(&port->timer);
444
445 /* Free existing transmit skbs */
446 fcoe_clean_pending_queue(fcoe->ctlr.lp);
447 434
448 /* 435 /*
449 * Don't listen for Ethernet packets anymore. 436 * Don't listen for Ethernet packets anymore.
@@ -466,9 +453,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
466 } else 453 } else
467 dev_mc_del(netdev, FIP_ALL_ENODE_MACS); 454 dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
468 455
469 if (!is_zero_ether_addr(port->data_src_addr))
470 dev_uc_del(netdev, port->data_src_addr);
471
472 /* Tell the LLD we are done w/ FCoE */ 456 /* Tell the LLD we are done w/ FCoE */
473 ops = netdev->netdev_ops; 457 ops = netdev->netdev_ops;
474 if (ops->ndo_fcoe_disable) { 458 if (ops->ndo_fcoe_disable) {
@@ -476,6 +460,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
476 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 460 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
477 " specific feature for LLD.\n"); 461 " specific feature for LLD.\n");
478 } 462 }
463
464 /* Release the self-reference taken during fcoe_interface_create() */
479 fcoe_interface_put(fcoe); 465 fcoe_interface_put(fcoe);
480} 466}
481 467
@@ -749,12 +735,27 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
749 * The offload EM that this routine is associated with will handle any 735 * The offload EM that this routine is associated with will handle any
750 * packets that are for SCSI read requests. 736 * packets that are for SCSI read requests.
751 * 737 *
738 * This has been enhanced to work when FCoE stack is operating in target
739 * mode.
740 *
752 * Returns: True for read types I/O, otherwise returns false. 741 * Returns: True for read types I/O, otherwise returns false.
753 */ 742 */
754bool fcoe_oem_match(struct fc_frame *fp) 743bool fcoe_oem_match(struct fc_frame *fp)
755{ 744{
756 return fc_fcp_is_read(fr_fsp(fp)) && 745 struct fc_frame_header *fh = fc_frame_header_get(fp);
757 (fr_fsp(fp)->data_len > fcoe_ddp_min); 746 struct fcp_cmnd *fcp;
747
748 if (fc_fcp_is_read(fr_fsp(fp)) &&
749 (fr_fsp(fp)->data_len > fcoe_ddp_min))
750 return true;
751 else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
752 fcp = fc_frame_payload_get(fp, sizeof(*fcp));
753 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
754 fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
755 (fcp->fc_flags & FCP_CFL_WRDATA))
756 return true;
757 }
758 return false;
758} 759}
759 760
760/** 761/**
@@ -844,6 +845,32 @@ skip_oem:
844 */ 845 */
845static void fcoe_if_destroy(struct fc_lport *lport) 846static void fcoe_if_destroy(struct fc_lport *lport)
846{ 847{
848 struct fcoe_port *port = lport_priv(lport);
849 struct fcoe_interface *fcoe = port->priv;
850 struct net_device *netdev = fcoe->netdev;
851
852 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
853
854 /* Logout of the fabric */
855 fc_fabric_logoff(lport);
856
857 /* Cleanup the fc_lport */
858 fc_lport_destroy(lport);
859
860 /* Stop the transmit retry timer */
861 del_timer_sync(&port->timer);
862
863 /* Free existing transmit skbs */
864 fcoe_clean_pending_queue(lport);
865
866 rtnl_lock();
867 if (!is_zero_ether_addr(port->data_src_addr))
868 dev_uc_del(netdev, port->data_src_addr);
869 rtnl_unlock();
870
871 /* Release reference held in fcoe_if_create() */
872 fcoe_interface_put(fcoe);
873
847 /* Free queued packets for the per-CPU receive threads */ 874 /* Free queued packets for the per-CPU receive threads */
848 fcoe_percpu_clean(lport); 875 fcoe_percpu_clean(lport);
849 876
@@ -887,6 +914,28 @@ static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
887} 914}
888 915
889/** 916/**
917 * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
918 * @lport: The local port to setup DDP for
919 * @xid: The exchange ID for this DDP transfer
920 * @sgl: The scatterlist describing this transfer
921 * @sgc: The number of sg items
922 *
923 * Returns: 0 if the DDP context was not configured
924 */
925static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
926 struct scatterlist *sgl, unsigned int sgc)
927{
928 struct net_device *netdev = fcoe_netdev(lport);
929
930 if (netdev->netdev_ops->ndo_fcoe_ddp_target)
931 return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
932 sgl, sgc);
933
934 return 0;
935}
936
937
938/**
890 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device 939 * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
891 * @lport: The local port to complete DDP on 940 * @lport: The local port to complete DDP on
892 * @xid: The exchange ID for this DDP transfer 941 * @xid: The exchange ID for this DDP transfer
@@ -1206,6 +1255,36 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
1206} 1255}
1207 1256
1208/** 1257/**
1258 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1259 * command.
1260 * @curr_cpu: CPU which received request
1261 *
1262 * This routine selects next CPU based on cpumask.
1263 *
1264 * Returns: int (CPU number). Caller to verify if returned CPU is online or not.
1265 */
1266static unsigned int fcoe_select_cpu(unsigned int curr_cpu)
1267{
1268 static unsigned int selected_cpu;
1269
1270 if (num_online_cpus() == 1)
1271 return curr_cpu;
1272 /*
1273 * Doing following check, to skip "curr_cpu (smp_processor_id)"
1274 * from selection of CPU is intentional. This is to avoid same CPU
1275 * doing post-processing of command. "curr_cpu" to just receive
1276 * incoming request in case where rx_id is UNKNOWN and all other
1277 * CPU to actually process the command(s)
1278 */
1279 do {
1280 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1281 if (selected_cpu >= nr_cpu_ids)
1282 selected_cpu = cpumask_first(cpu_online_mask);
1283 } while (selected_cpu == curr_cpu);
1284 return selected_cpu;
1285}
1286
1287/**
1209 * fcoe_rcv() - Receive packets from a net device 1288 * fcoe_rcv() - Receive packets from a net device
1210 * @skb: The received packet 1289 * @skb: The received packet
1211 * @netdev: The net device that the packet was received on 1290 * @netdev: The net device that the packet was received on
@@ -1281,9 +1360,20 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1281 */ 1360 */
1282 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1361 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1283 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1362 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1284 else 1363 else {
1285 cpu = smp_processor_id(); 1364 cpu = smp_processor_id();
1286 1365
1366 if ((fh->fh_type == FC_TYPE_FCP) &&
1367 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
1368 do {
1369 cpu = fcoe_select_cpu(cpu);
1370 } while (!cpu_online(cpu));
1371 } else if ((fh->fh_type == FC_TYPE_FCP) &&
1372 (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
1373 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1374 } else
1375 cpu = smp_processor_id();
1376 }
1287 fps = &per_cpu(fcoe_percpu, cpu); 1377 fps = &per_cpu(fcoe_percpu, cpu);
1288 spin_lock_bh(&fps->fcoe_rx_list.lock); 1378 spin_lock_bh(&fps->fcoe_rx_list.lock);
1289 if (unlikely(!fps->thread)) { 1379 if (unlikely(!fps->thread)) {
@@ -1733,7 +1823,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1733 case NETDEV_UNREGISTER: 1823 case NETDEV_UNREGISTER:
1734 list_del(&fcoe->list); 1824 list_del(&fcoe->list);
1735 port = lport_priv(fcoe->ctlr.lp); 1825 port = lport_priv(fcoe->ctlr.lp);
1736 fcoe_interface_cleanup(fcoe);
1737 queue_work(fcoe_wq, &port->destroy_work); 1826 queue_work(fcoe_wq, &port->destroy_work);
1738 goto out; 1827 goto out;
1739 break; 1828 break;
@@ -1827,22 +1916,22 @@ static int fcoe_destroy(struct net_device *netdev)
1827{ 1916{
1828 struct fcoe_interface *fcoe; 1917 struct fcoe_interface *fcoe;
1829 struct fc_lport *lport; 1918 struct fc_lport *lport;
1919 struct fcoe_port *port;
1830 int rc = 0; 1920 int rc = 0;
1831 1921
1832 mutex_lock(&fcoe_config_mutex); 1922 mutex_lock(&fcoe_config_mutex);
1833 rtnl_lock(); 1923 rtnl_lock();
1834 fcoe = fcoe_hostlist_lookup_port(netdev); 1924 fcoe = fcoe_hostlist_lookup_port(netdev);
1835 if (!fcoe) { 1925 if (!fcoe) {
1836 rtnl_unlock();
1837 rc = -ENODEV; 1926 rc = -ENODEV;
1838 goto out_nodev; 1927 goto out_nodev;
1839 } 1928 }
1840 lport = fcoe->ctlr.lp; 1929 lport = fcoe->ctlr.lp;
1930 port = lport_priv(lport);
1841 list_del(&fcoe->list); 1931 list_del(&fcoe->list);
1842 fcoe_interface_cleanup(fcoe); 1932 queue_work(fcoe_wq, &port->destroy_work);
1843 rtnl_unlock();
1844 fcoe_if_destroy(lport);
1845out_nodev: 1933out_nodev:
1934 rtnl_unlock();
1846 mutex_unlock(&fcoe_config_mutex); 1935 mutex_unlock(&fcoe_config_mutex);
1847 return rc; 1936 return rc;
1848} 1937}
@@ -1854,10 +1943,25 @@ out_nodev:
1854static void fcoe_destroy_work(struct work_struct *work) 1943static void fcoe_destroy_work(struct work_struct *work)
1855{ 1944{
1856 struct fcoe_port *port; 1945 struct fcoe_port *port;
1946 struct fcoe_interface *fcoe;
1947 int npiv = 0;
1857 1948
1858 port = container_of(work, struct fcoe_port, destroy_work); 1949 port = container_of(work, struct fcoe_port, destroy_work);
1859 mutex_lock(&fcoe_config_mutex); 1950 mutex_lock(&fcoe_config_mutex);
1951
1952 /* set if this is an NPIV port */
1953 npiv = port->lport->vport ? 1 : 0;
1954
1955 fcoe = port->priv;
1860 fcoe_if_destroy(port->lport); 1956 fcoe_if_destroy(port->lport);
1957
1958 /* Do not tear down the fcoe interface for NPIV port */
1959 if (!npiv) {
1960 rtnl_lock();
1961 fcoe_interface_cleanup(fcoe);
1962 rtnl_unlock();
1963 }
1964
1861 mutex_unlock(&fcoe_config_mutex); 1965 mutex_unlock(&fcoe_config_mutex);
1862} 1966}
1863 1967
@@ -1886,7 +1990,7 @@ static bool fcoe_match(struct net_device *netdev)
1886 */ 1990 */
1887static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) 1991static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1888{ 1992{
1889 int rc; 1993 int rc = 0;
1890 struct fcoe_interface *fcoe; 1994 struct fcoe_interface *fcoe;
1891 struct fc_lport *lport; 1995 struct fc_lport *lport;
1892 1996
@@ -1911,7 +2015,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1911 netdev->name); 2015 netdev->name);
1912 rc = -EIO; 2016 rc = -EIO;
1913 fcoe_interface_cleanup(fcoe); 2017 fcoe_interface_cleanup(fcoe);
1914 goto out_free; 2018 goto out_nodev;
1915 } 2019 }
1916 2020
1917 /* Make this the "master" N_Port */ 2021 /* Make this the "master" N_Port */
@@ -1926,17 +2030,6 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1926 if (!fcoe_link_ok(lport)) 2030 if (!fcoe_link_ok(lport))
1927 fcoe_ctlr_link_up(&fcoe->ctlr); 2031 fcoe_ctlr_link_up(&fcoe->ctlr);
1928 2032
1929 /*
1930 * Release from init in fcoe_interface_create(), on success lport
1931 * should be holding a reference taken in fcoe_if_create().
1932 */
1933 fcoe_interface_put(fcoe);
1934 rtnl_unlock();
1935 mutex_unlock(&fcoe_config_mutex);
1936
1937 return 0;
1938out_free:
1939 fcoe_interface_put(fcoe);
1940out_nodev: 2033out_nodev:
1941 rtnl_unlock(); 2034 rtnl_unlock();
1942 mutex_unlock(&fcoe_config_mutex); 2035 mutex_unlock(&fcoe_config_mutex);
@@ -2218,7 +2311,6 @@ static void __exit fcoe_exit(void)
2218 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2311 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2219 list_del(&fcoe->list); 2312 list_del(&fcoe->list);
2220 port = lport_priv(fcoe->ctlr.lp); 2313 port = lport_priv(fcoe->ctlr.lp);
2221 fcoe_interface_cleanup(fcoe);
2222 queue_work(fcoe_wq, &port->destroy_work); 2314 queue_work(fcoe_wq, &port->destroy_work);
2223 } 2315 }
2224 rtnl_unlock(); 2316 rtnl_unlock();
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 671cde9d4060..95a5ba29320d 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -37,7 +37,7 @@
37 37
38#define DRV_NAME "fnic" 38#define DRV_NAME "fnic"
39#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 39#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
40#define DRV_VERSION "1.5.0.1" 40#define DRV_VERSION "1.5.0.2"
41#define PFX DRV_NAME ": " 41#define PFX DRV_NAME ": "
42#define DFX DRV_NAME "%d: " 42#define DFX DRV_NAME "%d: "
43 43
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index bb63f1a1f808..fc98eb61e760 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -388,17 +388,6 @@ static void fnic_iounmap(struct fnic *fnic)
388 iounmap(fnic->bar0.vaddr); 388 iounmap(fnic->bar0.vaddr);
389} 389}
390 390
391/*
392 * Allocate element for mempools requiring GFP_DMA flag.
393 * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
394 */
395static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
396{
397 struct kmem_cache *mem = pool_data;
398
399 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
400}
401
402/** 391/**
403 * fnic_get_mac() - get assigned data MAC address for FIP code. 392 * fnic_get_mac() - get assigned data MAC address for FIP code.
404 * @lport: local port. 393 * @lport: local port.
@@ -603,14 +592,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
603 if (!fnic->io_req_pool) 592 if (!fnic->io_req_pool)
604 goto err_out_free_resources; 593 goto err_out_free_resources;
605 594
606 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, 595 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
607 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
608 if (!pool) 596 if (!pool)
609 goto err_out_free_ioreq_pool; 597 goto err_out_free_ioreq_pool;
610 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; 598 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
611 599
612 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, 600 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
613 fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
614 if (!pool) 601 if (!pool)
615 goto err_out_free_dflt_pool; 602 goto err_out_free_dflt_pool;
616 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; 603 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
@@ -876,7 +863,7 @@ static int __init fnic_init_module(void)
876 len = sizeof(struct fnic_dflt_sgl_list); 863 len = sizeof(struct fnic_dflt_sgl_list);
877 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create 864 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
878 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, 865 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
879 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, 866 SLAB_HWCACHE_ALIGN,
880 NULL); 867 NULL);
881 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { 868 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
882 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); 869 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
@@ -888,7 +875,7 @@ static int __init fnic_init_module(void)
888 len = sizeof(struct fnic_sgl_list); 875 len = sizeof(struct fnic_sgl_list);
889 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create 876 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
890 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, 877 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
891 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, 878 SLAB_HWCACHE_ALIGN,
892 NULL); 879 NULL);
893 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { 880 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
894 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); 881 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 538b31c2cf58..c40ce52ed7c6 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -406,7 +406,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
406 if (sg_count) { 406 if (sg_count) {
407 io_req->sgl_list = 407 io_req->sgl_list =
408 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], 408 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
409 GFP_ATOMIC | GFP_DMA); 409 GFP_ATOMIC);
410 if (!io_req->sgl_list) { 410 if (!io_req->sgl_list) {
411 ret = SCSI_MLQUEUE_HOST_BUSY; 411 ret = SCSI_MLQUEUE_HOST_BUSY;
412 scsi_dma_unmap(sc); 412 scsi_dma_unmap(sc);
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index df6bff7366cf..89700cbca16e 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -64,7 +64,8 @@ static void iscsi_boot_kobj_release(struct kobject *kobj)
64 struct iscsi_boot_kobj *boot_kobj = 64 struct iscsi_boot_kobj *boot_kobj =
65 container_of(kobj, struct iscsi_boot_kobj, kobj); 65 container_of(kobj, struct iscsi_boot_kobj, kobj);
66 66
67 kfree(boot_kobj->data); 67 if (boot_kobj->release)
68 boot_kobj->release(boot_kobj->data);
68 kfree(boot_kobj); 69 kfree(boot_kobj);
69} 70}
70 71
@@ -305,7 +306,8 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
305 struct attribute_group *attr_group, 306 struct attribute_group *attr_group,
306 const char *name, int index, void *data, 307 const char *name, int index, void *data,
307 ssize_t (*show) (void *data, int type, char *buf), 308 ssize_t (*show) (void *data, int type, char *buf),
308 mode_t (*is_visible) (void *data, int type)) 309 mode_t (*is_visible) (void *data, int type),
310 void (*release) (void *data))
309{ 311{
310 struct iscsi_boot_kobj *boot_kobj; 312 struct iscsi_boot_kobj *boot_kobj;
311 313
@@ -323,6 +325,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
323 boot_kobj->data = data; 325 boot_kobj->data = data;
324 boot_kobj->show = show; 326 boot_kobj->show = show;
325 boot_kobj->is_visible = is_visible; 327 boot_kobj->is_visible = is_visible;
328 boot_kobj->release = release;
326 329
327 if (sysfs_create_group(&boot_kobj->kobj, attr_group)) { 330 if (sysfs_create_group(&boot_kobj->kobj, attr_group)) {
328 /* 331 /*
@@ -331,7 +334,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
331 * the boot kobj was not setup and the normal release 334 * the boot kobj was not setup and the normal release
332 * path is not being run. 335 * path is not being run.
333 */ 336 */
334 boot_kobj->data = NULL; 337 boot_kobj->release = NULL;
335 kobject_put(&boot_kobj->kobj); 338 kobject_put(&boot_kobj->kobj);
336 return NULL; 339 return NULL;
337 } 340 }
@@ -357,6 +360,7 @@ static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj)
357 * @data: driver specific data for target 360 * @data: driver specific data for target
358 * @show: attr show function 361 * @show: attr show function
359 * @is_visible: attr visibility function 362 * @is_visible: attr visibility function
363 * @release: release function
360 * 364 *
361 * Note: The boot sysfs lib will free the data passed in for the caller 365 * Note: The boot sysfs lib will free the data passed in for the caller
362 * when all refs to the target kobject have been released. 366 * when all refs to the target kobject have been released.
@@ -365,10 +369,12 @@ struct iscsi_boot_kobj *
365iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, 369iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
366 void *data, 370 void *data,
367 ssize_t (*show) (void *data, int type, char *buf), 371 ssize_t (*show) (void *data, int type, char *buf),
368 mode_t (*is_visible) (void *data, int type)) 372 mode_t (*is_visible) (void *data, int type),
373 void (*release) (void *data))
369{ 374{
370 return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group, 375 return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
371 "target%d", index, data, show, is_visible); 376 "target%d", index, data, show, is_visible,
377 release);
372} 378}
373EXPORT_SYMBOL_GPL(iscsi_boot_create_target); 379EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
374 380
@@ -379,6 +385,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
379 * @data: driver specific data 385 * @data: driver specific data
380 * @show: attr show function 386 * @show: attr show function
381 * @is_visible: attr visibility function 387 * @is_visible: attr visibility function
388 * @release: release function
382 * 389 *
383 * Note: The boot sysfs lib will free the data passed in for the caller 390 * Note: The boot sysfs lib will free the data passed in for the caller
384 * when all refs to the initiator kobject have been released. 391 * when all refs to the initiator kobject have been released.
@@ -387,12 +394,13 @@ struct iscsi_boot_kobj *
387iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, 394iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
388 void *data, 395 void *data,
389 ssize_t (*show) (void *data, int type, char *buf), 396 ssize_t (*show) (void *data, int type, char *buf),
390 mode_t (*is_visible) (void *data, int type)) 397 mode_t (*is_visible) (void *data, int type),
398 void (*release) (void *data))
391{ 399{
392 return iscsi_boot_create_kobj(boot_kset, 400 return iscsi_boot_create_kobj(boot_kset,
393 &iscsi_boot_initiator_attr_group, 401 &iscsi_boot_initiator_attr_group,
394 "initiator", index, data, show, 402 "initiator", index, data, show,
395 is_visible); 403 is_visible, release);
396} 404}
397EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator); 405EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
398 406
@@ -403,6 +411,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
403 * @data: driver specific data 411 * @data: driver specific data
404 * @show: attr show function 412 * @show: attr show function
405 * @is_visible: attr visibility function 413 * @is_visible: attr visibility function
414 * @release: release function
406 * 415 *
407 * Note: The boot sysfs lib will free the data passed in for the caller 416 * Note: The boot sysfs lib will free the data passed in for the caller
408 * when all refs to the ethernet kobject have been released. 417 * when all refs to the ethernet kobject have been released.
@@ -411,12 +420,13 @@ struct iscsi_boot_kobj *
411iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, 420iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
412 void *data, 421 void *data,
413 ssize_t (*show) (void *data, int type, char *buf), 422 ssize_t (*show) (void *data, int type, char *buf),
414 mode_t (*is_visible) (void *data, int type)) 423 mode_t (*is_visible) (void *data, int type),
424 void (*release) (void *data))
415{ 425{
416 return iscsi_boot_create_kobj(boot_kset, 426 return iscsi_boot_create_kobj(boot_kset,
417 &iscsi_boot_ethernet_attr_group, 427 &iscsi_boot_ethernet_attr_group,
418 "ethernet%d", index, data, show, 428 "ethernet%d", index, data, show,
419 is_visible); 429 is_visible, release);
420} 430}
421EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet); 431EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
422 432
@@ -472,6 +482,9 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
472{ 482{
473 struct iscsi_boot_kobj *boot_kobj, *tmp_kobj; 483 struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
474 484
485 if (!boot_kset)
486 return;
487
475 list_for_each_entry_safe(boot_kobj, tmp_kobj, 488 list_for_each_entry_safe(boot_kobj, tmp_kobj,
476 &boot_kset->kobj_list, list) 489 &boot_kset->kobj_list, list)
477 iscsi_boot_remove_kobj(boot_kobj); 490 iscsi_boot_remove_kobj(boot_kobj);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 3df985305f69..7724414588fa 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -107,10 +107,12 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
107 * If the socket is in CLOSE or CLOSE_WAIT we should 107 * If the socket is in CLOSE or CLOSE_WAIT we should
108 * not close the connection if there is still some 108 * not close the connection if there is still some
109 * data pending. 109 * data pending.
110 *
111 * Must be called with sk_callback_lock.
110 */ 112 */
111static inline int iscsi_sw_sk_state_check(struct sock *sk) 113static inline int iscsi_sw_sk_state_check(struct sock *sk)
112{ 114{
113 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; 115 struct iscsi_conn *conn = sk->sk_user_data;
114 116
115 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && 117 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
116 !atomic_read(&sk->sk_rmem_alloc)) { 118 !atomic_read(&sk->sk_rmem_alloc)) {
@@ -123,11 +125,17 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
123 125
124static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) 126static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
125{ 127{
126 struct iscsi_conn *conn = sk->sk_user_data; 128 struct iscsi_conn *conn;
127 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 129 struct iscsi_tcp_conn *tcp_conn;
128 read_descriptor_t rd_desc; 130 read_descriptor_t rd_desc;
129 131
130 read_lock(&sk->sk_callback_lock); 132 read_lock(&sk->sk_callback_lock);
133 conn = sk->sk_user_data;
134 if (!conn) {
135 read_unlock(&sk->sk_callback_lock);
136 return;
137 }
138 tcp_conn = conn->dd_data;
131 139
132 /* 140 /*
133 * Use rd_desc to pass 'conn' to iscsi_tcp_recv. 141 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
@@ -141,11 +149,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
141 149
142 iscsi_sw_sk_state_check(sk); 150 iscsi_sw_sk_state_check(sk);
143 151
144 read_unlock(&sk->sk_callback_lock);
145
146 /* If we had to (atomically) map a highmem page, 152 /* If we had to (atomically) map a highmem page,
147 * unmap it now. */ 153 * unmap it now. */
148 iscsi_tcp_segment_unmap(&tcp_conn->in.segment); 154 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
155 read_unlock(&sk->sk_callback_lock);
149} 156}
150 157
151static void iscsi_sw_tcp_state_change(struct sock *sk) 158static void iscsi_sw_tcp_state_change(struct sock *sk)
@@ -157,8 +164,11 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
157 void (*old_state_change)(struct sock *); 164 void (*old_state_change)(struct sock *);
158 165
159 read_lock(&sk->sk_callback_lock); 166 read_lock(&sk->sk_callback_lock);
160 167 conn = sk->sk_user_data;
161 conn = (struct iscsi_conn*)sk->sk_user_data; 168 if (!conn) {
169 read_unlock(&sk->sk_callback_lock);
170 return;
171 }
162 session = conn->session; 172 session = conn->session;
163 173
164 iscsi_sw_sk_state_check(sk); 174 iscsi_sw_sk_state_check(sk);
@@ -178,11 +188,25 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
178 **/ 188 **/
179static void iscsi_sw_tcp_write_space(struct sock *sk) 189static void iscsi_sw_tcp_write_space(struct sock *sk)
180{ 190{
181 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; 191 struct iscsi_conn *conn;
182 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 192 struct iscsi_tcp_conn *tcp_conn;
183 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 193 struct iscsi_sw_tcp_conn *tcp_sw_conn;
194 void (*old_write_space)(struct sock *);
195
196 read_lock_bh(&sk->sk_callback_lock);
197 conn = sk->sk_user_data;
198 if (!conn) {
199 read_unlock_bh(&sk->sk_callback_lock);
200 return;
201 }
202
203 tcp_conn = conn->dd_data;
204 tcp_sw_conn = tcp_conn->dd_data;
205 old_write_space = tcp_sw_conn->old_write_space;
206 read_unlock_bh(&sk->sk_callback_lock);
207
208 old_write_space(sk);
184 209
185 tcp_sw_conn->old_write_space(sk);
186 ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n"); 210 ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
187 iscsi_conn_queue_work(conn); 211 iscsi_conn_queue_work(conn);
188} 212}
@@ -592,20 +616,17 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
592 /* userspace may have goofed up and not bound us */ 616 /* userspace may have goofed up and not bound us */
593 if (!sock) 617 if (!sock)
594 return; 618 return;
595 /*
596 * Make sure our recv side is stopped.
597 * Older tools called conn stop before ep_disconnect
598 * so IO could still be coming in.
599 */
600 write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
601 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
602 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
603 619
604 sock->sk->sk_err = EIO; 620 sock->sk->sk_err = EIO;
605 wake_up_interruptible(sk_sleep(sock->sk)); 621 wake_up_interruptible(sk_sleep(sock->sk));
606 622
607 iscsi_conn_stop(cls_conn, flag); 623 /* stop xmit side */
624 iscsi_suspend_tx(conn);
625
626 /* stop recv side and release socket */
608 iscsi_sw_tcp_release_conn(conn); 627 iscsi_sw_tcp_release_conn(conn);
628
629 iscsi_conn_stop(cls_conn, flag);
609} 630}
610 631
611static int 632static int
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 3b8a6451ea28..f5a0665b6773 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -965,8 +965,30 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
965 sp = &ep->seq; 965 sp = &ep->seq;
966 if (sp->id != fh->fh_seq_id) { 966 if (sp->id != fh->fh_seq_id) {
967 atomic_inc(&mp->stats.seq_not_found); 967 atomic_inc(&mp->stats.seq_not_found);
968 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ 968 if (f_ctl & FC_FC_END_SEQ) {
969 goto rel; 969 /*
970 * Update sequence_id based on incoming last
971 * frame of sequence exchange. This is needed
972 * for FCoE target where DDP has been used
973 * on target where, stack is indicated only
974 * about last frame's (payload _header) header.
975 * Whereas "seq_id" which is part of
976 * frame_header is allocated by initiator
977 * which is totally different from "seq_id"
978 * allocated when XFER_RDY was sent by target.
979 * To avoid false -ve which results into not
980 * sending RSP, hence write request on other
981 * end never finishes.
982 */
983 spin_lock_bh(&ep->ex_lock);
984 sp->ssb_stat |= SSB_ST_RESP;
985 sp->id = fh->fh_seq_id;
986 spin_unlock_bh(&ep->ex_lock);
987 } else {
988 /* sequence/exch should exist */
989 reject = FC_RJT_SEQ_ID;
990 goto rel;
991 }
970 } 992 }
971 } 993 }
972 WARN_ON(ep != fc_seq_exch(sp)); 994 WARN_ON(ep != fc_seq_exch(sp));
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 389ab80aef0a..e008b1673507 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1025,6 +1025,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
1025 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 1025 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
1026 } 1026 }
1027 fc_lport_state_enter(lport, LPORT_ST_RESET); 1027 fc_lport_state_enter(lport, LPORT_ST_RESET);
1028 fc_host_post_event(lport->host, fc_get_event_number(),
1029 FCH_EVT_LIPRESET, 0);
1028 fc_vports_linkchange(lport); 1030 fc_vports_linkchange(lport);
1029 fc_lport_reset_locked(lport); 1031 fc_lport_reset_locked(lport);
1030 if (lport->link_up) 1032 if (lport->link_up)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 01e13a2eb93a..760db7619446 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -789,6 +789,20 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
789 789
790 switch (rdata->rp_state) { 790 switch (rdata->rp_state) {
791 case RPORT_ST_INIT: 791 case RPORT_ST_INIT:
792 /*
793 * If received the FLOGI request on RPORT which is INIT state
794 * (means not transition to FLOGI either fc_rport timeout
795 * function didn;t trigger or this end hasn;t received
796 * beacon yet from other end. In that case only, allow RPORT
797 * state machine to continue, otherwise fall through which
798 * causes the code to send reject response.
799 * NOTE; Not checking for FIP->state such as VNMP_UP or
800 * VNMP_CLAIM because if FIP state is not one of those,
801 * RPORT wouldn;t have created and 'rport_lookup' would have
802 * failed anyway in that case.
803 */
804 if (lport->point_to_multipoint)
805 break;
792 case RPORT_ST_DELETE: 806 case RPORT_ST_DELETE:
793 mutex_unlock(&rdata->rp_mutex); 807 mutex_unlock(&rdata->rp_mutex);
794 rjt_data.reason = ELS_RJT_FIP; 808 rjt_data.reason = ELS_RJT_FIP;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 0c550d5b9133..d7a4120034a2 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -169,7 +169,7 @@ void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t
169 hdr->datasn = cpu_to_be32(r2t->datasn); 169 hdr->datasn = cpu_to_be32(r2t->datasn);
170 r2t->datasn++; 170 r2t->datasn++;
171 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 171 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
172 memcpy(hdr->lun, task->lun, sizeof(hdr->lun)); 172 hdr->lun = task->lun;
173 hdr->itt = task->hdr_itt; 173 hdr->itt = task->hdr_itt;
174 hdr->exp_statsn = r2t->exp_statsn; 174 hdr->exp_statsn = r2t->exp_statsn;
175 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent); 175 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
@@ -296,7 +296,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
296 /* 296 /*
297 * Allow PDUs for unrelated LUNs 297 * Allow PDUs for unrelated LUNs
298 */ 298 */
299 hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun); 299 hdr_lun = scsilun_to_int(&tmf->lun);
300 if (hdr_lun != task->sc->device->lun) 300 if (hdr_lun != task->sc->device->lun)
301 return 0; 301 return 0;
302 /* fall through */ 302 /* fall through */
@@ -389,8 +389,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
389 return rc; 389 return rc;
390 hdr->opcode = ISCSI_OP_SCSI_CMD; 390 hdr->opcode = ISCSI_OP_SCSI_CMD;
391 hdr->flags = ISCSI_ATTR_SIMPLE; 391 hdr->flags = ISCSI_ATTR_SIMPLE;
392 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 392 int_to_scsilun(sc->device->lun, &hdr->lun);
393 memcpy(task->lun, hdr->lun, sizeof(task->lun)); 393 task->lun = hdr->lun;
394 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 394 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
395 cmd_len = sc->cmd_len; 395 cmd_len = sc->cmd_len;
396 if (cmd_len < ISCSI_CDB_SIZE) 396 if (cmd_len < ISCSI_CDB_SIZE)
@@ -968,7 +968,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
968 hdr.flags = ISCSI_FLAG_CMD_FINAL; 968 hdr.flags = ISCSI_FLAG_CMD_FINAL;
969 969
970 if (rhdr) { 970 if (rhdr) {
971 memcpy(hdr.lun, rhdr->lun, 8); 971 hdr.lun = rhdr->lun;
972 hdr.ttt = rhdr->ttt; 972 hdr.ttt = rhdr->ttt;
973 hdr.itt = RESERVED_ITT; 973 hdr.itt = RESERVED_ITT;
974 } else 974 } else
@@ -2092,7 +2092,7 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
2092 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 2092 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2093 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 2093 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
2094 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2094 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2095 memcpy(hdr->lun, task->lun, sizeof(hdr->lun)); 2095 hdr->lun = task->lun;
2096 hdr->rtt = task->hdr_itt; 2096 hdr->rtt = task->hdr_itt;
2097 hdr->refcmdsn = task->cmdsn; 2097 hdr->refcmdsn = task->cmdsn;
2098} 2098}
@@ -2233,7 +2233,7 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2233 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 2233 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
2234 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; 2234 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
2235 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2235 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2236 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 2236 int_to_scsilun(sc->device->lun, &hdr->lun);
2237 hdr->rtt = RESERVED_ITT; 2237 hdr->rtt = RESERVED_ITT;
2238} 2238}
2239 2239
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index e98ae33f1295..09b232fd9a1b 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -1084,7 +1084,8 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
1084 struct iscsi_cls_conn *cls_conn; 1084 struct iscsi_cls_conn *cls_conn;
1085 struct iscsi_tcp_conn *tcp_conn; 1085 struct iscsi_tcp_conn *tcp_conn;
1086 1086
1087 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx); 1087 cls_conn = iscsi_conn_setup(cls_session,
1088 sizeof(*tcp_conn) + dd_data_size, conn_idx);
1088 if (!cls_conn) 1089 if (!cls_conn)
1089 return NULL; 1090 return NULL;
1090 conn = cls_conn->dd_data; 1091 conn = cls_conn->dd_data;
@@ -1096,22 +1097,13 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
1096 1097
1097 tcp_conn = conn->dd_data; 1098 tcp_conn = conn->dd_data;
1098 tcp_conn->iscsi_conn = conn; 1099 tcp_conn->iscsi_conn = conn;
1099 1100 tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
1100 tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
1101 if (!tcp_conn->dd_data) {
1102 iscsi_conn_teardown(cls_conn);
1103 return NULL;
1104 }
1105 return cls_conn; 1101 return cls_conn;
1106} 1102}
1107EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup); 1103EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
1108 1104
1109void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn) 1105void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
1110{ 1106{
1111 struct iscsi_conn *conn = cls_conn->dd_data;
1112 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1113
1114 kfree(tcp_conn->dd_data);
1115 iscsi_conn_teardown(cls_conn); 1107 iscsi_conn_teardown(cls_conn);
1116} 1108}
1117EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown); 1109EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ffe82d169b40..30b25c5fdd7e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1147,7 +1147,8 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
1147{ 1147{
1148 char mybuf[64]; 1148 char mybuf[64];
1149 char *pbuf, *step_str; 1149 char *pbuf, *step_str;
1150 int bsize, i; 1150 int i;
1151 size_t bsize;
1151 1152
1152 /* Protect copy from user */ 1153 /* Protect copy from user */
1153 if (!access_ok(VERIFY_READ, buf, nbytes)) 1154 if (!access_ok(VERIFY_READ, buf, nbytes))
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index bf2a1c516293..af3a6af97cc7 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -215,13 +215,6 @@ static int __init mac_scsi_setup(char *str) {
215__setup("mac5380=", mac_scsi_setup); 215__setup("mac5380=", mac_scsi_setup);
216 216
217/* 217/*
218 * If you want to find the instance with (k)gdb ...
219 */
220#if NDEBUG
221static struct Scsi_Host *default_instance;
222#endif
223
224/*
225 * Function : int macscsi_detect(struct scsi_host_template * tpnt) 218 * Function : int macscsi_detect(struct scsi_host_template * tpnt)
226 * 219 *
227 * Purpose : initializes mac NCR5380 driver based on the 220 * Purpose : initializes mac NCR5380 driver based on the
@@ -233,7 +226,7 @@ static struct Scsi_Host *default_instance;
233 * 226 *
234 */ 227 */
235 228
236int macscsi_detect(struct scsi_host_template * tpnt) 229int __init macscsi_detect(struct scsi_host_template * tpnt)
237{ 230{
238 static int called = 0; 231 static int called = 0;
239 int flags = 0; 232 int flags = 0;
@@ -268,10 +261,7 @@ int macscsi_detect(struct scsi_host_template * tpnt)
268 /* Once we support multiple 5380s (e.g. DuoDock) we'll do 261 /* Once we support multiple 5380s (e.g. DuoDock) we'll do
269 something different here */ 262 something different here */
270 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); 263 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
271#if NDEBUG 264
272 default_instance = instance;
273#endif
274
275 if (macintosh_config->ident == MAC_MODEL_IIFX) { 265 if (macintosh_config->ident == MAC_MODEL_IIFX) {
276 mac_scsi_regp = via1+0x8000; 266 mac_scsi_regp = via1+0x8000;
277 mac_scsi_drq = via1+0xE000; 267 mac_scsi_drq = via1+0xE000;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a3e60385787f..3105d5e8d908 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.17 11 * mpi2.h Version: 02.00.18
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -64,6 +64,8 @@
64 * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. 64 * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
65 * Added alternative defines for the SGE Direction bit. 65 * Added alternative defines for the SGE Direction bit.
66 * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. 66 * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
67 * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
68 * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
67 * -------------------------------------------------------------------------- 69 * --------------------------------------------------------------------------
68 */ 70 */
69 71
@@ -89,7 +91,7 @@
89#define MPI2_VERSION_02_00 (0x0200) 91#define MPI2_VERSION_02_00 (0x0200)
90 92
91/* versioning for this MPI header set */ 93/* versioning for this MPI header set */
92#define MPI2_HEADER_VERSION_UNIT (0x11) 94#define MPI2_HEADER_VERSION_UNIT (0x12)
93#define MPI2_HEADER_VERSION_DEV (0x00) 95#define MPI2_HEADER_VERSION_DEV (0x00)
94#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 96#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
95#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 97#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -1060,10 +1062,14 @@ typedef struct _MPI2_IEEE_SGE_UNION
1060 1062
1061#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) 1063#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
1062#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) 1064#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
1065 /* IEEE Simple Element only */
1063#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) 1066#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
1067 /* IEEE Simple Element only */
1064#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) 1068#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
1065#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) 1069#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
1066 1070 /* IEEE Simple Element only */
1071#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03)
1072 /* IEEE Chain Element only */
1067 1073
1068/**************************************************************************** 1074/****************************************************************************
1069* IEEE SGE operation Macros 1075* IEEE SGE operation Macros
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index f5b9c766e28f..61475a6480e3 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.16 9 * mpi2_cnfg.h Version: 02.00.17
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -127,6 +127,13 @@
127 * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. 127 * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
128 * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) 128 * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
129 * defines. 129 * defines.
130 * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
131 * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
132 * the Pinout field.
133 * Added BoardTemperature and BoardTemperatureUnits fields
134 * to MPI2_CONFIG_PAGE_IO_UNIT_7.
135 * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
136 * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
130 * -------------------------------------------------------------------------- 137 * --------------------------------------------------------------------------
131 */ 138 */
132 139
@@ -210,6 +217,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
210#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) 217#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
211#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) 218#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
212#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) 219#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
220#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
213 221
214 222
215/***************************************************************************** 223/*****************************************************************************
@@ -612,23 +620,31 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
612 U32 Pinout; /* 0x00 */ 620 U32 Pinout; /* 0x00 */
613 U8 Connector[16]; /* 0x04 */ 621 U8 Connector[16]; /* 0x04 */
614 U8 Location; /* 0x14 */ 622 U8 Location; /* 0x14 */
615 U8 Reserved1; /* 0x15 */ 623 U8 ReceptacleID; /* 0x15 */
616 U16 Slot; /* 0x16 */ 624 U16 Slot; /* 0x16 */
617 U32 Reserved2; /* 0x18 */ 625 U32 Reserved2; /* 0x18 */
618} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO, 626} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
619 Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t; 627 Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
620 628
621/* defines for the Pinout field */ 629/* defines for the Pinout field */
622#define MPI2_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000) 630#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
623#define MPI2_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000) 631#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
624#define MPI2_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000) 632
625#define MPI2_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000) 633#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
626#define MPI2_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800) 634#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
627#define MPI2_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400) 635#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
628#define MPI2_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200) 636#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
629#define MPI2_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100) 637#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
630#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x00000002) 638#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
631#define MPI2_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001) 639#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
640#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
641#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
642#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
643#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
644#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
645#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
646#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
647#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
632 648
633/* defines for the Location field */ 649/* defines for the Location field */
634#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) 650#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
@@ -662,7 +678,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
662 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7, 678 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
663 Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t; 679 Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
664 680
665#define MPI2_MANUFACTURING7_PAGEVERSION (0x00) 681#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
666 682
667/* defines for the Flags field */ 683/* defines for the Flags field */
668#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) 684#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -849,11 +865,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
849 U16 IOCTemperature; /* 0x10 */ 865 U16 IOCTemperature; /* 0x10 */
850 U8 IOCTemperatureUnits; /* 0x12 */ 866 U8 IOCTemperatureUnits; /* 0x12 */
851 U8 IOCSpeed; /* 0x13 */ 867 U8 IOCSpeed; /* 0x13 */
852 U32 Reserved3; /* 0x14 */ 868 U16 BoardTemperature; /* 0x14 */
869 U8 BoardTemperatureUnits; /* 0x16 */
870 U8 Reserved3; /* 0x17 */
853} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, 871} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
854 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; 872 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
855 873
856#define MPI2_IOUNITPAGE7_PAGEVERSION (0x01) 874#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
857 875
858/* defines for IO Unit Page 7 PCIeWidth field */ 876/* defines for IO Unit Page 7 PCIeWidth field */
859#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) 877#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
@@ -881,7 +899,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
881#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) 899#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
882#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) 900#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
883 901
884
885/* defines for IO Unit Page 7 IOCTemperatureUnits field */ 902/* defines for IO Unit Page 7 IOCTemperatureUnits field */
886#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) 903#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
887#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) 904#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
@@ -893,6 +910,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
893#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) 910#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
894#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) 911#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
895 912
913/* defines for IO Unit Page 7 BoardTemperatureUnits field */
914#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
915#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
916#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
917
896 918
897 919
898/**************************************************************************** 920/****************************************************************************
@@ -2799,5 +2821,25 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
2799#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) 2821#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
2800 2822
2801 2823
2824/****************************************************************************
2825* Extended Manufacturing Config Pages
2826****************************************************************************/
2827
2828/*
2829 * Generic structure to use for product-specific extended manufacturing pages
2830 * (currently Extended Manufacturing Page 40 through Extended Manufacturing
2831 * Page 60).
2832 */
2833
2834typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
2835 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
2836 U32 ProductSpecificInfo; /* 0x08 */
2837} MPI2_CONFIG_PAGE_EXT_MAN_PS,
2838 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
2839 Mpi2ExtManufacturingPagePS_t,
2840 MPI2_POINTER pMpi2ExtManufacturingPagePS_t;
2841
2842/* PageVersion should be provided by product-specific code */
2843
2802#endif 2844#endif
2803 2845
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 165454d52591..de90162413c2 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.10 9 * mpi2_init.h Version: 02.00.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -33,6 +33,7 @@
33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. 33 * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. 34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
35 * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. 35 * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
36 * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
36 * -------------------------------------------------------------------------- 37 * --------------------------------------------------------------------------
37 */ 38 */
38 39
@@ -139,6 +140,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
139#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) 140#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
140#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) 141#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
141 142
143/* number of SGLOffset fields */
144#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
145
142/* SCSI IO IoFlags bits */ 146/* SCSI IO IoFlags bits */
143 147
144/* Large CDB Address Space */ 148/* Large CDB Address Space */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 761cbdb8a033..1f0c190d336e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.15 9 * mpi2_ioc.h Version: 02.00.16
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -103,6 +103,7 @@
103 * defines. 103 * defines.
104 * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. 104 * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
105 * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. 105 * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
106 * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
106 * -------------------------------------------------------------------------- 107 * --------------------------------------------------------------------------
107 */ 108 */
108 109
@@ -1032,6 +1033,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
1032#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) 1033#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
1033#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) 1034#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
1034#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) 1035#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
1036#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
1035 1037
1036/* FWDownload TransactionContext Element */ 1038/* FWDownload TransactionContext Element */
1037typedef struct _MPI2_FW_DOWNLOAD_TCSGE 1039typedef struct _MPI2_FW_DOWNLOAD_TCSGE
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index efa0255491c2..83035bd1c489 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -94,7 +94,7 @@ module_param(diag_buffer_enable, int, 0);
94MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " 94MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
95 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 95 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
96 96
97int mpt2sas_fwfault_debug; 97static int mpt2sas_fwfault_debug;
98MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " 98MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
99 "and halt firmware - (default=0)"); 99 "and halt firmware - (default=0)");
100 100
@@ -857,7 +857,7 @@ _base_interrupt(int irq, void *bus_id)
857 completed_cmds = 0; 857 completed_cmds = 0;
858 cb_idx = 0xFF; 858 cb_idx = 0xFF;
859 do { 859 do {
860 rd.word = rpf->Words; 860 rd.word = le64_to_cpu(rpf->Words);
861 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 861 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
862 goto out; 862 goto out;
863 reply = 0; 863 reply = 0;
@@ -906,7 +906,7 @@ _base_interrupt(int irq, void *bus_id)
906 906
907 next: 907 next:
908 908
909 rpf->Words = ULLONG_MAX; 909 rpf->Words = cpu_to_le64(ULLONG_MAX);
910 ioc->reply_post_host_index = (ioc->reply_post_host_index == 910 ioc->reply_post_host_index = (ioc->reply_post_host_index ==
911 (ioc->reply_post_queue_depth - 1)) ? 0 : 911 (ioc->reply_post_queue_depth - 1)) ? 0 :
912 ioc->reply_post_host_index + 1; 912 ioc->reply_post_host_index + 1;
@@ -1740,9 +1740,11 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1740static void 1740static void
1741_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) 1741_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1742{ 1742{
1743 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL && 1743 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1744 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) { 1744 return;
1745 1745
1746 switch (ioc->pdev->device) {
1747 case MPI2_MFGPAGE_DEVID_SAS2008:
1746 switch (ioc->pdev->subsystem_device) { 1748 switch (ioc->pdev->subsystem_device) {
1747 case MPT2SAS_INTEL_RMS2LL080_SSDID: 1749 case MPT2SAS_INTEL_RMS2LL080_SSDID:
1748 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1750 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -1752,7 +1754,20 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1752 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1754 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1753 MPT2SAS_INTEL_RMS2LL040_BRANDING); 1755 MPT2SAS_INTEL_RMS2LL040_BRANDING);
1754 break; 1756 break;
1757 default:
1758 break;
1759 }
1760 case MPI2_MFGPAGE_DEVID_SAS2308_2:
1761 switch (ioc->pdev->subsystem_device) {
1762 case MPT2SAS_INTEL_RS25GB008_SSDID:
1763 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1764 MPT2SAS_INTEL_RS25GB008_BRANDING);
1765 break;
1766 default:
1767 break;
1755 } 1768 }
1769 default:
1770 break;
1756 } 1771 }
1757} 1772}
1758 1773
@@ -1817,7 +1832,9 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1817 char desc[16]; 1832 char desc[16];
1818 u8 revision; 1833 u8 revision;
1819 u32 iounit_pg1_flags; 1834 u32 iounit_pg1_flags;
1835 u32 bios_version;
1820 1836
1837 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1821 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); 1838 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
1822 strncpy(desc, ioc->manu_pg0.ChipName, 16); 1839 strncpy(desc, ioc->manu_pg0.ChipName, 16);
1823 printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " 1840 printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
@@ -1828,10 +1845,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1828 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 1845 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
1829 ioc->facts.FWVersion.Word & 0x000000FF, 1846 ioc->facts.FWVersion.Word & 0x000000FF,
1830 revision, 1847 revision,
1831 (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24, 1848 (bios_version & 0xFF000000) >> 24,
1832 (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16, 1849 (bios_version & 0x00FF0000) >> 16,
1833 (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8, 1850 (bios_version & 0x0000FF00) >> 8,
1834 ioc->bios_pg3.BiosVersion & 0x000000FF); 1851 bios_version & 0x000000FF);
1835 1852
1836 _base_display_dell_branding(ioc); 1853 _base_display_dell_branding(ioc);
1837 _base_display_intel_branding(ioc); 1854 _base_display_intel_branding(ioc);
@@ -2150,7 +2167,7 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2150static int 2167static int
2151_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 2168_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2152{ 2169{
2153 Mpi2IOCFactsReply_t *facts; 2170 struct mpt2sas_facts *facts;
2154 u32 queue_size, queue_diff; 2171 u32 queue_size, queue_diff;
2155 u16 max_sge_elements; 2172 u16 max_sge_elements;
2156 u16 num_of_reply_frames; 2173 u16 num_of_reply_frames;
@@ -2783,7 +2800,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2783 int i; 2800 int i;
2784 u8 failed; 2801 u8 failed;
2785 u16 dummy; 2802 u16 dummy;
2786 u32 *mfp; 2803 __le32 *mfp;
2787 2804
2788 /* make sure doorbell is not in use */ 2805 /* make sure doorbell is not in use */
2789 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 2806 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
@@ -2871,7 +2888,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2871 writel(0, &ioc->chip->HostInterruptStatus); 2888 writel(0, &ioc->chip->HostInterruptStatus);
2872 2889
2873 if (ioc->logging_level & MPT_DEBUG_INIT) { 2890 if (ioc->logging_level & MPT_DEBUG_INIT) {
2874 mfp = (u32 *)reply; 2891 mfp = (__le32 *)reply;
2875 printk(KERN_INFO "\toffset:data\n"); 2892 printk(KERN_INFO "\toffset:data\n");
2876 for (i = 0; i < reply_bytes/4; i++) 2893 for (i = 0; i < reply_bytes/4; i++)
2877 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, 2894 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
@@ -3097,7 +3114,8 @@ static int
3097_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag) 3114_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3098{ 3115{
3099 Mpi2PortFactsRequest_t mpi_request; 3116 Mpi2PortFactsRequest_t mpi_request;
3100 Mpi2PortFactsReply_t mpi_reply, *pfacts; 3117 Mpi2PortFactsReply_t mpi_reply;
3118 struct mpt2sas_port_facts *pfacts;
3101 int mpi_reply_sz, mpi_request_sz, r; 3119 int mpi_reply_sz, mpi_request_sz, r;
3102 3120
3103 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3121 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -3139,7 +3157,8 @@ static int
3139_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) 3157_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3140{ 3158{
3141 Mpi2IOCFactsRequest_t mpi_request; 3159 Mpi2IOCFactsRequest_t mpi_request;
3142 Mpi2IOCFactsReply_t mpi_reply, *facts; 3160 Mpi2IOCFactsReply_t mpi_reply;
3161 struct mpt2sas_facts *facts;
3143 int mpi_reply_sz, mpi_request_sz, r; 3162 int mpi_reply_sz, mpi_request_sz, r;
3144 3163
3145 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3164 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -3225,17 +3244,6 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3225 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 3244 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3226 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 3245 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3227 3246
3228 /* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was
3229 * removed and made reserved. For those with older firmware will need
3230 * this fix. It was decided that the Reply and Request frame sizes are
3231 * the same.
3232 */
3233 if ((ioc->facts.HeaderVersion >> 8) < 0xA) {
3234 mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz);
3235/* mpi_request.SystemReplyFrameSize =
3236 * cpu_to_le16(ioc->reply_sz);
3237 */
3238 }
3239 3247
3240 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 3248 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3241 mpi_request.ReplyDescriptorPostQueueDepth = 3249 mpi_request.ReplyDescriptorPostQueueDepth =
@@ -3243,25 +3251,17 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3243 mpi_request.ReplyFreeQueueDepth = 3251 mpi_request.ReplyFreeQueueDepth =
3244 cpu_to_le16(ioc->reply_free_queue_depth); 3252 cpu_to_le16(ioc->reply_free_queue_depth);
3245 3253
3246#if BITS_PER_LONG > 32
3247 mpi_request.SenseBufferAddressHigh = 3254 mpi_request.SenseBufferAddressHigh =
3248 cpu_to_le32(ioc->sense_dma >> 32); 3255 cpu_to_le32((u64)ioc->sense_dma >> 32);
3249 mpi_request.SystemReplyAddressHigh = 3256 mpi_request.SystemReplyAddressHigh =
3250 cpu_to_le32(ioc->reply_dma >> 32); 3257 cpu_to_le32((u64)ioc->reply_dma >> 32);
3251 mpi_request.SystemRequestFrameBaseAddress = 3258 mpi_request.SystemRequestFrameBaseAddress =
3252 cpu_to_le64(ioc->request_dma); 3259 cpu_to_le64((u64)ioc->request_dma);
3253 mpi_request.ReplyFreeQueueAddress = 3260 mpi_request.ReplyFreeQueueAddress =
3254 cpu_to_le64(ioc->reply_free_dma); 3261 cpu_to_le64((u64)ioc->reply_free_dma);
3255 mpi_request.ReplyDescriptorPostQueueAddress = 3262 mpi_request.ReplyDescriptorPostQueueAddress =
3256 cpu_to_le64(ioc->reply_post_free_dma); 3263 cpu_to_le64((u64)ioc->reply_post_free_dma);
3257#else 3264
3258 mpi_request.SystemRequestFrameBaseAddress =
3259 cpu_to_le32(ioc->request_dma);
3260 mpi_request.ReplyFreeQueueAddress =
3261 cpu_to_le32(ioc->reply_free_dma);
3262 mpi_request.ReplyDescriptorPostQueueAddress =
3263 cpu_to_le32(ioc->reply_post_free_dma);
3264#endif
3265 3265
3266 /* This time stamp specifies number of milliseconds 3266 /* This time stamp specifies number of milliseconds
3267 * since epoch ~ midnight January 1, 1970. 3267 * since epoch ~ midnight January 1, 1970.
@@ -3271,10 +3271,10 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3271 (current_time.tv_usec / 1000)); 3271 (current_time.tv_usec / 1000));
3272 3272
3273 if (ioc->logging_level & MPT_DEBUG_INIT) { 3273 if (ioc->logging_level & MPT_DEBUG_INIT) {
3274 u32 *mfp; 3274 __le32 *mfp;
3275 int i; 3275 int i;
3276 3276
3277 mfp = (u32 *)&mpi_request; 3277 mfp = (__le32 *)&mpi_request;
3278 printk(KERN_INFO "\toffset:data\n"); 3278 printk(KERN_INFO "\toffset:data\n");
3279 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 3279 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3280 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4, 3280 printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
@@ -3759,7 +3759,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3759 3759
3760 /* initialize Reply Post Free Queue */ 3760 /* initialize Reply Post Free Queue */
3761 for (i = 0; i < ioc->reply_post_queue_depth; i++) 3761 for (i = 0; i < ioc->reply_post_queue_depth; i++)
3762 ioc->reply_post_free[i].Words = ULLONG_MAX; 3762 ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX);
3763 3763
3764 r = _base_send_ioc_init(ioc, sleep_flag); 3764 r = _base_send_ioc_init(ioc, sleep_flag);
3765 if (r) 3765 if (r)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index dcc289c25459..8d5be2120c63 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "08.100.00.02" 72#define MPT2SAS_DRIVER_VERSION "09.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 08 73#define MPT2SAS_MAJOR_VERSION 09
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 02 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -161,12 +161,15 @@
161 "Intel Integrated RAID Module RMS2LL080" 161 "Intel Integrated RAID Module RMS2LL080"
162#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ 162#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
163 "Intel Integrated RAID Module RMS2LL040" 163 "Intel Integrated RAID Module RMS2LL040"
164#define MPT2SAS_INTEL_RS25GB008_BRANDING \
165 "Intel(R) RAID Controller RS25GB008"
164 166
165/* 167/*
166 * Intel HBA SSDIDs 168 * Intel HBA SSDIDs
167 */ 169 */
168#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E 170#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
169#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F 171#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
172#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
170 173
171 174
172/* 175/*
@@ -541,6 +544,63 @@ struct _tr_list {
541 544
542typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); 545typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
543 546
547/* IOC Facts and Port Facts converted from little endian to cpu */
548union mpi2_version_union {
549 MPI2_VERSION_STRUCT Struct;
550 u32 Word;
551};
552
553struct mpt2sas_facts {
554 u16 MsgVersion;
555 u16 HeaderVersion;
556 u8 IOCNumber;
557 u8 VP_ID;
558 u8 VF_ID;
559 u16 IOCExceptions;
560 u16 IOCStatus;
561 u32 IOCLogInfo;
562 u8 MaxChainDepth;
563 u8 WhoInit;
564 u8 NumberOfPorts;
565 u8 MaxMSIxVectors;
566 u16 RequestCredit;
567 u16 ProductID;
568 u32 IOCCapabilities;
569 union mpi2_version_union FWVersion;
570 u16 IOCRequestFrameSize;
571 u16 Reserved3;
572 u16 MaxInitiators;
573 u16 MaxTargets;
574 u16 MaxSasExpanders;
575 u16 MaxEnclosures;
576 u16 ProtocolFlags;
577 u16 HighPriorityCredit;
578 u16 MaxReplyDescriptorPostQueueDepth;
579 u8 ReplyFrameSize;
580 u8 MaxVolumes;
581 u16 MaxDevHandle;
582 u16 MaxPersistentEntries;
583 u16 MinDevHandle;
584};
585
586struct mpt2sas_port_facts {
587 u8 PortNumber;
588 u8 VP_ID;
589 u8 VF_ID;
590 u8 PortType;
591 u16 MaxPostedCmdBuffers;
592};
593
594/**
595 * enum mutex_type - task management mutex type
596 * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
597 * @TM_MUTEX_ON: mutex is required
598 */
599enum mutex_type {
600 TM_MUTEX_OFF = 0,
601 TM_MUTEX_ON = 1,
602};
603
544/** 604/**
545 * struct MPT2SAS_ADAPTER - per adapter struct 605 * struct MPT2SAS_ADAPTER - per adapter struct
546 * @list: ioc_list 606 * @list: ioc_list
@@ -703,6 +763,7 @@ struct MPT2SAS_ADAPTER {
703 /* misc flags */ 763 /* misc flags */
704 int aen_event_read_flag; 764 int aen_event_read_flag;
705 u8 broadcast_aen_busy; 765 u8 broadcast_aen_busy;
766 u16 broadcast_aen_pending;
706 u8 shost_recovery; 767 u8 shost_recovery;
707 768
708 struct mutex reset_in_progress_mutex; 769 struct mutex reset_in_progress_mutex;
@@ -749,8 +810,8 @@ struct MPT2SAS_ADAPTER {
749 u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; 810 u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
750 811
751 /* static config pages */ 812 /* static config pages */
752 Mpi2IOCFactsReply_t facts; 813 struct mpt2sas_facts facts;
753 Mpi2PortFactsReply_t *pfacts; 814 struct mpt2sas_port_facts *pfacts;
754 Mpi2ManufacturingPage0_t manu_pg0; 815 Mpi2ManufacturingPage0_t manu_pg0;
755 Mpi2BiosPage2_t bios_pg2; 816 Mpi2BiosPage2_t bios_pg2;
756 Mpi2BiosPage3_t bios_pg3; 817 Mpi2BiosPage3_t bios_pg3;
@@ -840,7 +901,7 @@ struct MPT2SAS_ADAPTER {
840 901
841 /* reply free queue */ 902 /* reply free queue */
842 u16 reply_free_queue_depth; 903 u16 reply_free_queue_depth;
843 u32 *reply_free; 904 __le32 *reply_free;
844 dma_addr_t reply_free_dma; 905 dma_addr_t reply_free_dma;
845 struct dma_pool *reply_free_dma_pool; 906 struct dma_pool *reply_free_dma_pool;
846 u32 reply_free_host_index; 907 u32 reply_free_host_index;
@@ -932,8 +993,8 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
932u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 993u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
933 u32 reply); 994 u32 reply);
934int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, 995int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
935 uint channel, uint id, uint lun, u8 type, u16 smid_task, 996 uint channel, uint id, uint lun, u8 type, u16 smid_task,
936 ulong timeout, struct scsi_cmnd *scmd); 997 ulong timeout, unsigned long serial_number, enum mutex_type m_type);
937void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 998void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
938void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 999void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
939void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1000void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 437c2d94c45a..38ed0260959d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -994,7 +994,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
994 mpt2sas_scsih_issue_tm(ioc, 994 mpt2sas_scsih_issue_tm(ioc,
995 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 995 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
996 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, 996 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
997 NULL); 997 0, TM_MUTEX_ON);
998 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 998 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
999 } else 999 } else
1000 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1000 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
@@ -2706,13 +2706,13 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
2706 _ctl_ioc_reset_count_show, NULL); 2706 _ctl_ioc_reset_count_show, NULL);
2707 2707
2708struct DIAG_BUFFER_START { 2708struct DIAG_BUFFER_START {
2709 u32 Size; 2709 __le32 Size;
2710 u32 DiagVersion; 2710 __le32 DiagVersion;
2711 u8 BufferType; 2711 u8 BufferType;
2712 u8 Reserved[3]; 2712 u8 Reserved[3];
2713 u32 Reserved1; 2713 __le32 Reserved1;
2714 u32 Reserved2; 2714 __le32 Reserved2;
2715 u32 Reserved3; 2715 __le32 Reserved3;
2716}; 2716};
2717/** 2717/**
2718 * _ctl_host_trace_buffer_size_show - host buffer size (trace only) 2718 * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 3dcddfeb6f4c..9731f8e661bf 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -164,7 +164,7 @@ static inline void
164_debug_dump_mf(void *mpi_request, int sz) 164_debug_dump_mf(void *mpi_request, int sz)
165{ 165{
166 int i; 166 int i;
167 u32 *mfp = (u32 *)mpi_request; 167 __le32 *mfp = (__le32 *)mpi_request;
168 168
169 printk(KERN_INFO "mf:\n\t"); 169 printk(KERN_INFO "mf:\n\t");
170 for (i = 0; i < sz; i++) { 170 for (i = 0; i < sz; i++) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index a7dbc6825f5f..939f283d0c28 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -94,6 +94,10 @@ static u32 logging_level;
94MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info " 94MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
95 "(default=0)"); 95 "(default=0)");
96 96
97static ushort max_sectors = 0xFFFF;
98module_param(max_sectors, ushort, 0);
99MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
100
97/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 101/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
98#define MPT2SAS_MAX_LUN (16895) 102#define MPT2SAS_MAX_LUN (16895)
99static int max_lun = MPT2SAS_MAX_LUN; 103static int max_lun = MPT2SAS_MAX_LUN;
@@ -1956,7 +1960,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
1956 case MPI2_RAID_VOL_TYPE_RAID1E: 1960 case MPI2_RAID_VOL_TYPE_RAID1E:
1957 qdepth = MPT2SAS_RAID_QUEUE_DEPTH; 1961 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1958 if (ioc->manu_pg10.OEMIdentifier && 1962 if (ioc->manu_pg10.OEMIdentifier &&
1959 (ioc->manu_pg10.GenericFlags0 & 1963 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
1960 MFG10_GF0_R10_DISPLAY) && 1964 MFG10_GF0_R10_DISPLAY) &&
1961 !(raid_device->num_pds % 2)) 1965 !(raid_device->num_pds % 2))
1962 r_level = "RAID10"; 1966 r_level = "RAID10";
@@ -2236,6 +2240,8 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2236 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2240 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2237 * @smid_task: smid assigned to the task 2241 * @smid_task: smid assigned to the task
2238 * @timeout: timeout in seconds 2242 * @timeout: timeout in seconds
2243 * @serial_number: the serial_number from scmd
2244 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2239 * Context: user 2245 * Context: user
2240 * 2246 *
2241 * A generic API for sending task management requests to firmware. 2247 * A generic API for sending task management requests to firmware.
@@ -2247,17 +2253,18 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2247int 2253int
2248mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, 2254mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2249 uint id, uint lun, u8 type, u16 smid_task, ulong timeout, 2255 uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
2250 struct scsi_cmnd *scmd) 2256 unsigned long serial_number, enum mutex_type m_type)
2251{ 2257{
2252 Mpi2SCSITaskManagementRequest_t *mpi_request; 2258 Mpi2SCSITaskManagementRequest_t *mpi_request;
2253 Mpi2SCSITaskManagementReply_t *mpi_reply; 2259 Mpi2SCSITaskManagementReply_t *mpi_reply;
2254 u16 smid = 0; 2260 u16 smid = 0;
2255 u32 ioc_state; 2261 u32 ioc_state;
2256 unsigned long timeleft; 2262 unsigned long timeleft;
2257 struct scsi_cmnd *scmd_lookup; 2263 struct scsiio_tracker *scsi_lookup = NULL;
2258 int rc; 2264 int rc;
2259 2265
2260 mutex_lock(&ioc->tm_cmds.mutex); 2266 if (m_type == TM_MUTEX_ON)
2267 mutex_lock(&ioc->tm_cmds.mutex);
2261 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { 2268 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
2262 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", 2269 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
2263 __func__, ioc->name); 2270 __func__, ioc->name);
@@ -2277,18 +2284,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2277 if (ioc_state & MPI2_DOORBELL_USED) { 2284 if (ioc_state & MPI2_DOORBELL_USED) {
2278 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell " 2285 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
2279 "active!\n", ioc->name)); 2286 "active!\n", ioc->name));
2280 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2287 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2281 FORCE_BIG_HAMMER); 2288 FORCE_BIG_HAMMER);
2282 rc = SUCCESS; 2289 rc = (!rc) ? SUCCESS : FAILED;
2283 goto err_out; 2290 goto err_out;
2284 } 2291 }
2285 2292
2286 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2293 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2287 mpt2sas_base_fault_info(ioc, ioc_state & 2294 mpt2sas_base_fault_info(ioc, ioc_state &
2288 MPI2_DOORBELL_DATA_MASK); 2295 MPI2_DOORBELL_DATA_MASK);
2289 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2296 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2290 FORCE_BIG_HAMMER); 2297 FORCE_BIG_HAMMER);
2291 rc = SUCCESS; 2298 rc = (!rc) ? SUCCESS : FAILED;
2292 goto err_out; 2299 goto err_out;
2293 } 2300 }
2294 2301
@@ -2300,6 +2307,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2300 goto err_out; 2307 goto err_out;
2301 } 2308 }
2302 2309
2310 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
2311 scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
2312
2303 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x)," 2313 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
2304 " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type, 2314 " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type,
2305 smid_task)); 2315 smid_task));
@@ -2307,6 +2317,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2307 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2317 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2308 ioc->tm_cmds.smid = smid; 2318 ioc->tm_cmds.smid = smid;
2309 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 2319 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2320 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2310 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2321 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2311 mpi_request->DevHandle = cpu_to_le16(handle); 2322 mpi_request->DevHandle = cpu_to_le16(handle);
2312 mpi_request->TaskType = type; 2323 mpi_request->TaskType = type;
@@ -2322,9 +2333,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2322 _debug_dump_mf(mpi_request, 2333 _debug_dump_mf(mpi_request,
2323 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 2334 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2324 if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) { 2335 if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
2325 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2336 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2326 FORCE_BIG_HAMMER); 2337 FORCE_BIG_HAMMER);
2327 rc = SUCCESS; 2338 rc = (!rc) ? SUCCESS : FAILED;
2328 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 2339 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2329 mpt2sas_scsih_clear_tm_flag(ioc, handle); 2340 mpt2sas_scsih_clear_tm_flag(ioc, handle);
2330 goto err_out; 2341 goto err_out;
@@ -2346,20 +2357,12 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2346 } 2357 }
2347 } 2358 }
2348 2359
2349 /* sanity check:
2350 * Check to see the commands were terminated.
2351 * This is only needed for eh callbacks, hence the scmd check.
2352 */
2353 rc = FAILED;
2354 if (scmd == NULL)
2355 goto bypass_sanity_checks;
2356 switch (type) { 2360 switch (type) {
2357 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 2361 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2358 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task); 2362 rc = SUCCESS;
2359 if (scmd_lookup) 2363 if (scsi_lookup->scmd == NULL)
2360 rc = FAILED; 2364 break;
2361 else 2365 rc = FAILED;
2362 rc = SUCCESS;
2363 break; 2366 break;
2364 2367
2365 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 2368 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
@@ -2369,24 +2372,31 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2369 rc = SUCCESS; 2372 rc = SUCCESS;
2370 break; 2373 break;
2371 2374
2375 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2372 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 2376 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2373 if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) 2377 if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
2374 rc = FAILED; 2378 rc = FAILED;
2375 else 2379 else
2376 rc = SUCCESS; 2380 rc = SUCCESS;
2377 break; 2381 break;
2382 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
2383 rc = SUCCESS;
2384 break;
2385 default:
2386 rc = FAILED;
2387 break;
2378 } 2388 }
2379 2389
2380 bypass_sanity_checks:
2381
2382 mpt2sas_scsih_clear_tm_flag(ioc, handle); 2390 mpt2sas_scsih_clear_tm_flag(ioc, handle);
2383 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 2391 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2384 mutex_unlock(&ioc->tm_cmds.mutex); 2392 if (m_type == TM_MUTEX_ON)
2393 mutex_unlock(&ioc->tm_cmds.mutex);
2385 2394
2386 return rc; 2395 return rc;
2387 2396
2388 err_out: 2397 err_out:
2389 mutex_unlock(&ioc->tm_cmds.mutex); 2398 if (m_type == TM_MUTEX_ON)
2399 mutex_unlock(&ioc->tm_cmds.mutex);
2390 return rc; 2400 return rc;
2391} 2401}
2392 2402
@@ -2496,7 +2506,8 @@ _scsih_abort(struct scsi_cmnd *scmd)
2496 handle = sas_device_priv_data->sas_target->handle; 2506 handle = sas_device_priv_data->sas_target->handle;
2497 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2507 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2498 scmd->device->id, scmd->device->lun, 2508 scmd->device->id, scmd->device->lun,
2499 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd); 2509 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
2510 scmd->serial_number, TM_MUTEX_ON);
2500 2511
2501 out: 2512 out:
2502 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2513 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2557,7 +2568,8 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2557 2568
2558 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2569 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2559 scmd->device->id, scmd->device->lun, 2570 scmd->device->id, scmd->device->lun,
2560 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd); 2571 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
2572 TM_MUTEX_ON);
2561 2573
2562 out: 2574 out:
2563 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2575 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2617,7 +2629,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2617 2629
2618 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2630 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2619 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 2631 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
2620 30, scmd); 2632 30, 0, TM_MUTEX_ON);
2621 2633
2622 out: 2634 out:
2623 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2635 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2750,6 +2762,31 @@ _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
2750} 2762}
2751 2763
2752/** 2764/**
2765 * _scsih_ublock_io_all_device - unblock every device
2766 * @ioc: per adapter object
2767 *
2768 * change the device state from block to running
2769 */
2770static void
2771_scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
2772{
2773 struct MPT2SAS_DEVICE *sas_device_priv_data;
2774 struct scsi_device *sdev;
2775
2776 shost_for_each_device(sdev, ioc->shost) {
2777 sas_device_priv_data = sdev->hostdata;
2778 if (!sas_device_priv_data)
2779 continue;
2780 if (!sas_device_priv_data->block)
2781 continue;
2782 sas_device_priv_data->block = 0;
2783 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
2784 "handle(0x%04x)\n",
2785 sas_device_priv_data->sas_target->handle));
2786 scsi_internal_device_unblock(sdev);
2787 }
2788}
2789/**
2753 * _scsih_ublock_io_device - set the device state to SDEV_RUNNING 2790 * _scsih_ublock_io_device - set the device state to SDEV_RUNNING
2754 * @ioc: per adapter object 2791 * @ioc: per adapter object
2755 * @handle: device handle 2792 * @handle: device handle
@@ -2779,6 +2816,34 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2779} 2816}
2780 2817
2781/** 2818/**
2819 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
2820 * @ioc: per adapter object
2821 * @handle: device handle
2822 *
2823 * During device pull we need to appropiately set the sdev state.
2824 */
2825static void
2826_scsih_block_io_all_device(struct MPT2SAS_ADAPTER *ioc)
2827{
2828 struct MPT2SAS_DEVICE *sas_device_priv_data;
2829 struct scsi_device *sdev;
2830
2831 shost_for_each_device(sdev, ioc->shost) {
2832 sas_device_priv_data = sdev->hostdata;
2833 if (!sas_device_priv_data)
2834 continue;
2835 if (sas_device_priv_data->block)
2836 continue;
2837 sas_device_priv_data->block = 1;
2838 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_blocked, "
2839 "handle(0x%04x)\n",
2840 sas_device_priv_data->sas_target->handle));
2841 scsi_internal_device_block(sdev);
2842 }
2843}
2844
2845
2846/**
2782 * _scsih_block_io_device - set the device state to SDEV_BLOCK 2847 * _scsih_block_io_device - set the device state to SDEV_BLOCK
2783 * @ioc: per adapter object 2848 * @ioc: per adapter object
2784 * @handle: device handle 2849 * @handle: device handle
@@ -3698,7 +3763,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3698 return 0; 3763 return 0;
3699 } 3764 }
3700 3765
3701 if (ioc->pci_error_recovery) { 3766 if (ioc->pci_error_recovery || ioc->remove_host) {
3702 scmd->result = DID_NO_CONNECT << 16; 3767 scmd->result = DID_NO_CONNECT << 16;
3703 scmd->scsi_done(scmd); 3768 scmd->scsi_done(scmd);
3704 return 0; 3769 return 0;
@@ -4598,7 +4663,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4598 Mpi2SasEnclosurePage0_t enclosure_pg0; 4663 Mpi2SasEnclosurePage0_t enclosure_pg0;
4599 u32 ioc_status; 4664 u32 ioc_status;
4600 u16 parent_handle; 4665 u16 parent_handle;
4601 __le64 sas_address, sas_address_parent = 0; 4666 u64 sas_address, sas_address_parent = 0;
4602 int i; 4667 int i;
4603 unsigned long flags; 4668 unsigned long flags;
4604 struct _sas_port *mpt2sas_port = NULL; 4669 struct _sas_port *mpt2sas_port = NULL;
@@ -5380,9 +5445,10 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
5380 break; 5445 break;
5381 } 5446 }
5382 printk(MPT2SAS_INFO_FMT "device status change: (%s)\n" 5447 printk(MPT2SAS_INFO_FMT "device status change: (%s)\n"
5383 "\thandle(0x%04x), sas address(0x%016llx)", ioc->name, 5448 "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
5384 reason_str, le16_to_cpu(event_data->DevHandle), 5449 ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
5385 (unsigned long long)le64_to_cpu(event_data->SASAddress)); 5450 (unsigned long long)le64_to_cpu(event_data->SASAddress),
5451 le16_to_cpu(event_data->TaskTag));
5386 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 5452 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
5387 printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, 5453 printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
5388 event_data->ASC, event_data->ASCQ); 5454 event_data->ASC, event_data->ASCQ);
@@ -5404,7 +5470,7 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5404{ 5470{
5405 struct MPT2SAS_TARGET *target_priv_data; 5471 struct MPT2SAS_TARGET *target_priv_data;
5406 struct _sas_device *sas_device; 5472 struct _sas_device *sas_device;
5407 __le64 sas_address; 5473 u64 sas_address;
5408 unsigned long flags; 5474 unsigned long flags;
5409 Mpi2EventDataSasDeviceStatusChange_t *event_data = 5475 Mpi2EventDataSasDeviceStatusChange_t *event_data =
5410 fw_event->event_data; 5476 fw_event->event_data;
@@ -5522,25 +5588,38 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5522 u32 termination_count; 5588 u32 termination_count;
5523 u32 query_count; 5589 u32 query_count;
5524 Mpi2SCSITaskManagementReply_t *mpi_reply; 5590 Mpi2SCSITaskManagementReply_t *mpi_reply;
5525#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
5526 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; 5591 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
5527#endif
5528 u16 ioc_status; 5592 u16 ioc_status;
5529 unsigned long flags; 5593 unsigned long flags;
5530 int r; 5594 int r;
5595 u8 max_retries = 0;
5596 u8 task_abort_retries;
5531 5597
5532 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primitive: " 5598 mutex_lock(&ioc->tm_cmds.mutex);
5533 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 5599 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), "
5534 event_data->PortWidth)); 5600 "width(%d)\n", ioc->name, __func__, event_data->PhyNum,
5535 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 5601 event_data->PortWidth));
5536 __func__)); 5602
5603 _scsih_block_io_all_device(ioc);
5537 5604
5538 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5605 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5539 ioc->broadcast_aen_busy = 0; 5606 mpi_reply = ioc->tm_cmds.reply;
5607broadcast_aen_retry:
5608
5609 /* sanity checks for retrying this loop */
5610 if (max_retries++ == 5) {
5611 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: giving up\n",
5612 ioc->name, __func__));
5613 goto out;
5614 } else if (max_retries > 1)
5615 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %d retry\n",
5616 ioc->name, __func__, max_retries - 1));
5617
5540 termination_count = 0; 5618 termination_count = 0;
5541 query_count = 0; 5619 query_count = 0;
5542 mpi_reply = ioc->tm_cmds.reply;
5543 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 5620 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5621 if (ioc->ioc_reset_in_progress_status)
5622 goto out;
5544 scmd = _scsih_scsi_lookup_get(ioc, smid); 5623 scmd = _scsih_scsi_lookup_get(ioc, smid);
5545 if (!scmd) 5624 if (!scmd)
5546 continue; 5625 continue;
@@ -5561,34 +5640,90 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5561 lun = sas_device_priv_data->lun; 5640 lun = sas_device_priv_data->lun;
5562 query_count++; 5641 query_count++;
5563 5642
5643 if (ioc->ioc_reset_in_progress_status)
5644 goto out;
5645
5564 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 5646 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5565 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5647 r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
5566 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); 5648 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
5567 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 5649 TM_MUTEX_OFF);
5650 if (r == FAILED) {
5651 sdev_printk(KERN_WARNING, sdev,
5652 "mpt2sas_scsih_issue_tm: FAILED when sending "
5653 "QUERY_TASK: scmd(%p)\n", scmd);
5654 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5655 goto broadcast_aen_retry;
5656 }
5568 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 5657 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
5569 & MPI2_IOCSTATUS_MASK; 5658 & MPI2_IOCSTATUS_MASK;
5570 if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && 5659 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5571 (mpi_reply->ResponseCode == 5660 sdev_printk(KERN_WARNING, sdev, "query task: FAILED "
5661 "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status,
5662 scmd);
5663 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5664 goto broadcast_aen_retry;
5665 }
5666
5667 /* see if IO is still owned by IOC and target */
5668 if (mpi_reply->ResponseCode ==
5572 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 5669 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
5573 mpi_reply->ResponseCode == 5670 mpi_reply->ResponseCode ==
5574 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) { 5671 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
5575 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5672 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5576 continue; 5673 continue;
5577 } 5674 }
5675 task_abort_retries = 0;
5676 tm_retry:
5677 if (task_abort_retries++ == 60) {
5678 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
5679 "%s: ABORT_TASK: giving up\n", ioc->name,
5680 __func__));
5681 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5682 goto broadcast_aen_retry;
5683 }
5684
5685 if (ioc->ioc_reset_in_progress_status)
5686 goto out_no_lock;
5687
5578 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 5688 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
5579 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, 5689 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
5580 scmd); 5690 scmd->serial_number, TM_MUTEX_OFF);
5581 if (r == FAILED) 5691 if (r == FAILED) {
5582 sdev_printk(KERN_WARNING, sdev, "task abort: FAILED " 5692 sdev_printk(KERN_WARNING, sdev,
5693 "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
5583 "scmd(%p)\n", scmd); 5694 "scmd(%p)\n", scmd);
5695 goto tm_retry;
5696 }
5697
5698 if (task_abort_retries > 1)
5699 sdev_printk(KERN_WARNING, sdev,
5700 "mpt2sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
5701 " scmd(%p)\n",
5702 task_abort_retries - 1, scmd);
5703
5584 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 5704 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
5585 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5705 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5586 } 5706 }
5707
5708 if (ioc->broadcast_aen_pending) {
5709 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: loop back due to"
5710 " pending AEN\n", ioc->name, __func__));
5711 ioc->broadcast_aen_pending = 0;
5712 goto broadcast_aen_retry;
5713 }
5714
5715 out:
5587 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 5716 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5717 out_no_lock:
5588 5718
5589 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT 5719 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
5590 "%s - exit, query_count = %d termination_count = %d\n", 5720 "%s - exit, query_count = %d termination_count = %d\n",
5591 ioc->name, __func__, query_count, termination_count)); 5721 ioc->name, __func__, query_count, termination_count));
5722
5723 ioc->broadcast_aen_busy = 0;
5724 if (!ioc->ioc_reset_in_progress_status)
5725 _scsih_ublock_io_all_device(ioc);
5726 mutex_unlock(&ioc->tm_cmds.mutex);
5592} 5727}
5593 5728
5594/** 5729/**
@@ -6566,7 +6701,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
6566 Mpi2ExpanderPage0_t expander_pg0; 6701 Mpi2ExpanderPage0_t expander_pg0;
6567 Mpi2ConfigReply_t mpi_reply; 6702 Mpi2ConfigReply_t mpi_reply;
6568 u16 ioc_status; 6703 u16 ioc_status;
6569 __le64 sas_address; 6704 u64 sas_address;
6570 u16 handle; 6705 u16 handle;
6571 6706
6572 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); 6707 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
@@ -6862,10 +6997,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
6862 mpi_reply->EventData; 6997 mpi_reply->EventData;
6863 6998
6864 if (baen_data->Primitive != 6999 if (baen_data->Primitive !=
6865 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT || 7000 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
6866 ioc->broadcast_aen_busy)
6867 return 1; 7001 return 1;
6868 ioc->broadcast_aen_busy = 1; 7002
7003 if (ioc->broadcast_aen_busy) {
7004 ioc->broadcast_aen_pending++;
7005 return 1;
7006 } else
7007 ioc->broadcast_aen_busy = 1;
6869 break; 7008 break;
6870 } 7009 }
6871 7010
@@ -7211,7 +7350,6 @@ _scsih_remove(struct pci_dev *pdev)
7211 } 7350 }
7212 7351
7213 sas_remove_host(shost); 7352 sas_remove_host(shost);
7214 _scsih_shutdown(pdev);
7215 list_del(&ioc->list); 7353 list_del(&ioc->list);
7216 scsi_remove_host(shost); 7354 scsi_remove_host(shost);
7217 scsi_host_put(shost); 7355 scsi_host_put(shost);
@@ -7436,6 +7574,25 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7436 shost->transportt = mpt2sas_transport_template; 7574 shost->transportt = mpt2sas_transport_template;
7437 shost->unique_id = ioc->id; 7575 shost->unique_id = ioc->id;
7438 7576
7577 if (max_sectors != 0xFFFF) {
7578 if (max_sectors < 64) {
7579 shost->max_sectors = 64;
7580 printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
7581 "for max_sectors, range is 64 to 8192. Assigning "
7582 "value of 64.\n", ioc->name, max_sectors);
7583 } else if (max_sectors > 8192) {
7584 shost->max_sectors = 8192;
7585 printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
7586 "for max_sectors, range is 64 to 8192. Assigning "
7587 "default value of 8192.\n", ioc->name,
7588 max_sectors);
7589 } else {
7590 shost->max_sectors = max_sectors & 0xFFFE;
7591 printk(MPT2SAS_INFO_FMT "The max_sectors value is "
7592 "set to %d\n", ioc->name, shost->max_sectors);
7593 }
7594 }
7595
7439 if ((scsi_add_host(shost, &pdev->dev))) { 7596 if ((scsi_add_host(shost, &pdev->dev))) {
7440 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 7597 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
7441 ioc->name, __FILE__, __LINE__, __func__); 7598 ioc->name, __FILE__, __LINE__, __func__);
@@ -7505,7 +7662,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
7505{ 7662{
7506 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7663 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7507 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 7664 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
7508 u32 device_state; 7665 pci_power_t device_state;
7509 7666
7510 mpt2sas_base_stop_watchdog(ioc); 7667 mpt2sas_base_stop_watchdog(ioc);
7511 scsi_block_requests(shost); 7668 scsi_block_requests(shost);
@@ -7532,7 +7689,7 @@ _scsih_resume(struct pci_dev *pdev)
7532{ 7689{
7533 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7690 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7534 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 7691 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
7535 u32 device_state = pdev->current_state; 7692 pci_power_t device_state = pdev->current_state;
7536 int r; 7693 int r;
7537 7694
7538 printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous " 7695 printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index cb1cdecbe0f8..15c798026217 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -299,7 +299,6 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
299 void *data_out = NULL; 299 void *data_out = NULL;
300 dma_addr_t data_out_dma; 300 dma_addr_t data_out_dma;
301 u32 sz; 301 u32 sz;
302 u64 *sas_address_le;
303 u16 wait_state_count; 302 u16 wait_state_count;
304 303
305 if (ioc->shost_recovery || ioc->pci_error_recovery) { 304 if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -372,8 +371,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
372 mpi_request->PhysicalPort = 0xFF; 371 mpi_request->PhysicalPort = 0xFF;
373 mpi_request->VF_ID = 0; /* TODO */ 372 mpi_request->VF_ID = 0; /* TODO */
374 mpi_request->VP_ID = 0; 373 mpi_request->VP_ID = 0;
375 sas_address_le = (u64 *)&mpi_request->SASAddress; 374 mpi_request->SASAddress = cpu_to_le64(sas_address);
376 *sas_address_le = cpu_to_le64(sas_address);
377 mpi_request->RequestDataLength = 375 mpi_request->RequestDataLength =
378 cpu_to_le16(sizeof(struct rep_manu_request)); 376 cpu_to_le16(sizeof(struct rep_manu_request));
379 psge = &mpi_request->SGL; 377 psge = &mpi_request->SGL;
@@ -1049,14 +1047,14 @@ struct phy_error_log_reply{
1049 u8 function; /* 0x11 */ 1047 u8 function; /* 0x11 */
1050 u8 function_result; 1048 u8 function_result;
1051 u8 response_length; 1049 u8 response_length;
1052 u16 expander_change_count; 1050 __be16 expander_change_count;
1053 u8 reserved_1[3]; 1051 u8 reserved_1[3];
1054 u8 phy_identifier; 1052 u8 phy_identifier;
1055 u8 reserved_2[2]; 1053 u8 reserved_2[2];
1056 u32 invalid_dword; 1054 __be32 invalid_dword;
1057 u32 running_disparity_error; 1055 __be32 running_disparity_error;
1058 u32 loss_of_dword_sync; 1056 __be32 loss_of_dword_sync;
1059 u32 phy_reset_problem; 1057 __be32 phy_reset_problem;
1060}; 1058};
1061 1059
1062/** 1060/**
@@ -1085,7 +1083,6 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
1085 void *data_out = NULL; 1083 void *data_out = NULL;
1086 dma_addr_t data_out_dma; 1084 dma_addr_t data_out_dma;
1087 u32 sz; 1085 u32 sz;
1088 u64 *sas_address_le;
1089 u16 wait_state_count; 1086 u16 wait_state_count;
1090 1087
1091 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1088 if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -1160,8 +1157,7 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
1160 mpi_request->PhysicalPort = 0xFF; 1157 mpi_request->PhysicalPort = 0xFF;
1161 mpi_request->VF_ID = 0; /* TODO */ 1158 mpi_request->VF_ID = 0; /* TODO */
1162 mpi_request->VP_ID = 0; 1159 mpi_request->VP_ID = 0;
1163 sas_address_le = (u64 *)&mpi_request->SASAddress; 1160 mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
1164 *sas_address_le = cpu_to_le64(phy->identify.sas_address);
1165 mpi_request->RequestDataLength = 1161 mpi_request->RequestDataLength =
1166 cpu_to_le16(sizeof(struct phy_error_log_request)); 1162 cpu_to_le16(sizeof(struct phy_error_log_request));
1167 psge = &mpi_request->SGL; 1163 psge = &mpi_request->SGL;
@@ -1406,7 +1402,6 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
1406 void *data_out = NULL; 1402 void *data_out = NULL;
1407 dma_addr_t data_out_dma; 1403 dma_addr_t data_out_dma;
1408 u32 sz; 1404 u32 sz;
1409 u64 *sas_address_le;
1410 u16 wait_state_count; 1405 u16 wait_state_count;
1411 1406
1412 if (ioc->shost_recovery) { 1407 if (ioc->shost_recovery) {
@@ -1486,8 +1481,7 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
1486 mpi_request->PhysicalPort = 0xFF; 1481 mpi_request->PhysicalPort = 0xFF;
1487 mpi_request->VF_ID = 0; /* TODO */ 1482 mpi_request->VF_ID = 0; /* TODO */
1488 mpi_request->VP_ID = 0; 1483 mpi_request->VP_ID = 0;
1489 sas_address_le = (u64 *)&mpi_request->SASAddress; 1484 mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
1490 *sas_address_le = cpu_to_le64(phy->identify.sas_address);
1491 mpi_request->RequestDataLength = 1485 mpi_request->RequestDataLength =
1492 cpu_to_le16(sizeof(struct phy_error_log_request)); 1486 cpu_to_le16(sizeof(struct phy_error_log_request));
1493 psge = &mpi_request->SGL; 1487 psge = &mpi_request->SGL;
@@ -1914,7 +1908,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1914 mpi_request->PhysicalPort = 0xFF; 1908 mpi_request->PhysicalPort = 0xFF;
1915 mpi_request->VF_ID = 0; /* TODO */ 1909 mpi_request->VF_ID = 0; /* TODO */
1916 mpi_request->VP_ID = 0; 1910 mpi_request->VP_ID = 0;
1917 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1911 mpi_request->SASAddress = (rphy) ?
1918 cpu_to_le64(rphy->identify.sas_address) : 1912 cpu_to_le64(rphy->identify.sas_address) :
1919 cpu_to_le64(ioc->sas_hba.sas_address); 1913 cpu_to_le64(ioc->sas_hba.sas_address);
1920 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 1914 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 82e9e5c0476e..cf8dfab9489f 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -197,6 +197,7 @@ static struct {
197 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 197 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
198 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, 198 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
199 {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, 199 {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
200 {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
200 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, 201 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
201 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, 202 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
202 {"INSITE", "I325VM", NULL, BLIST_KEY}, 203 {"INSITE", "I325VM", NULL, BLIST_KEY},
@@ -243,6 +244,7 @@ static struct {
243 {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, 244 {"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
244 {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, 245 {"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
245 {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, 246 {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
247 {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */
246 {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, 248 {"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
247 {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN}, 249 {"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN},
248 {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN}, 250 {"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ec1803a48723..28d9c9d6b4b4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -213,6 +213,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
213 int ret = DRIVER_ERROR << 24; 213 int ret = DRIVER_ERROR << 24;
214 214
215 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 215 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
216 if (!req)
217 return ret;
216 218
217 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
218 buffer, bufflen, __GFP_WAIT)) 220 buffer, bufflen, __GFP_WAIT))
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eb7a3e85304f..eba183c428cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -160,6 +160,10 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
160 return NULL; 160 return NULL;
161} 161}
162 162
163/* For device slot and array device slot elements, byte 3 bit 6
164 * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this
165 * code stands these bits are shifted 4 positions right so in
166 * sysfs they will appear as bits 2 and 1 respectively. Strange. */
163static void ses_get_fault(struct enclosure_device *edev, 167static void ses_get_fault(struct enclosure_device *edev,
164 struct enclosure_component *ecomp) 168 struct enclosure_component *ecomp)
165{ 169{
@@ -181,7 +185,7 @@ static int ses_set_fault(struct enclosure_device *edev,
181 /* zero is disabled */ 185 /* zero is disabled */
182 break; 186 break;
183 case ENCLOSURE_SETTING_ENABLED: 187 case ENCLOSURE_SETTING_ENABLED:
184 desc[2] = 0x02; 188 desc[3] = 0x20;
185 break; 189 break;
186 default: 190 default:
187 /* SES doesn't do the SGPIO blink settings */ 191 /* SES doesn't do the SGPIO blink settings */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 4778e2707168..5fc97d2ba2fd 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -221,14 +221,33 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi,
221 return 0; 221 return 0;
222 222
223 events = sr_get_events(cd->device); 223 events = sr_get_events(cd->device);
224 cd->get_event_changed |= events & DISK_EVENT_MEDIA_CHANGE;
225
226 /*
227 * If earlier GET_EVENT_STATUS_NOTIFICATION and TUR did not agree
228 * for several times in a row. We rely on TUR only for this likely
229 * broken device, to prevent generating incorrect media changed
230 * events for every open().
231 */
232 if (cd->ignore_get_event) {
233 events &= ~DISK_EVENT_MEDIA_CHANGE;
234 goto do_tur;
235 }
236
224 /* 237 /*
225 * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE 238 * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE
226 * is being cleared. Note that there are devices which hang 239 * is being cleared. Note that there are devices which hang
227 * if asked to execute TUR repeatedly. 240 * if asked to execute TUR repeatedly.
228 */ 241 */
229 if (!(clearing & DISK_EVENT_MEDIA_CHANGE)) 242 if (cd->device->changed) {
230 goto skip_tur; 243 events |= DISK_EVENT_MEDIA_CHANGE;
244 cd->device->changed = 0;
245 cd->tur_changed = true;
246 }
231 247
248 if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
249 return events;
250do_tur:
232 /* let's see whether the media is there with TUR */ 251 /* let's see whether the media is there with TUR */
233 last_present = cd->media_present; 252 last_present = cd->media_present;
234 ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); 253 ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
@@ -242,12 +261,31 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi,
242 (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a); 261 (scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a);
243 262
244 if (last_present != cd->media_present) 263 if (last_present != cd->media_present)
245 events |= DISK_EVENT_MEDIA_CHANGE; 264 cd->device->changed = 1;
246skip_tur: 265
247 if (cd->device->changed) { 266 if (cd->device->changed) {
248 events |= DISK_EVENT_MEDIA_CHANGE; 267 events |= DISK_EVENT_MEDIA_CHANGE;
249 cd->device->changed = 0; 268 cd->device->changed = 0;
269 cd->tur_changed = true;
270 }
271
272 if (cd->ignore_get_event)
273 return events;
274
275 /* check whether GET_EVENT is reporting spurious MEDIA_CHANGE */
276 if (!cd->tur_changed) {
277 if (cd->get_event_changed) {
278 if (cd->tur_mismatch++ > 8) {
279 sdev_printk(KERN_WARNING, cd->device,
280 "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n");
281 cd->ignore_get_event = true;
282 }
283 } else {
284 cd->tur_mismatch = 0;
285 }
250 } 286 }
287 cd->tur_changed = false;
288 cd->get_event_changed = false;
251 289
252 return events; 290 return events;
253} 291}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index e036f1dc83c8..37c8f6b17510 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -41,6 +41,13 @@ typedef struct scsi_cd {
41 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ 41 unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
42 unsigned readcd_cdda:1; /* reading audio data using READ_CD */ 42 unsigned readcd_cdda:1; /* reading audio data using READ_CD */
43 unsigned media_present:1; /* media is present */ 43 unsigned media_present:1; /* media is present */
44
45 /* GET_EVENT spurious event handling, blk layer guarantees exclusion */
46 int tur_mismatch; /* nr of get_event TUR mismatches */
47 bool tur_changed:1; /* changed according to TUR */
48 bool get_event_changed:1; /* changed according to GET_EVENT */
49 bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
50
44 struct cdrom_device_info cdi; 51 struct cdrom_device_info cdi;
45 /* We hold gendisk and scsi_device references on probe and use 52 /* We hold gendisk and scsi_device references on probe and use
46 * the refs on this kref to decide when to release them */ 53 * the refs on this kref to decide when to release them */
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 07eaef1c722b..7e12a2e4e0a3 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -49,13 +49,6 @@
49 * inside the execution of NCR5380_intr(), leading to recursive 49 * inside the execution of NCR5380_intr(), leading to recursive
50 * calls. 50 * calls.
51 * 51 *
52 * - I've added a function merge_contiguous_buffers() that tries to
53 * merge scatter-gather buffers that are located at contiguous
54 * physical addresses and can be processed with the same DMA setup.
55 * Since most scatter-gather operations work on a page (4K) of
56 * 4 buffers (1K), in more than 90% of all cases three interrupts and
57 * DMA setup actions are saved.
58 *
59 * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA 52 * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
60 * and USLEEP, because these were messing up readability and will never be 53 * and USLEEP, because these were messing up readability and will never be
61 * needed for Atari SCSI. 54 * needed for Atari SCSI.
@@ -266,8 +259,9 @@ static struct scsi_host_template *the_template = NULL;
266 (struct NCR5380_hostdata *)(in)->hostdata 259 (struct NCR5380_hostdata *)(in)->hostdata
267#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) 260#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
268 261
269#define NEXT(cmd) (*(struct scsi_cmnd **)&((cmd)->host_scribble)) 262#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
270#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) 263#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next))
264#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble))
271 265
272#define HOSTNO instance->host_no 266#define HOSTNO instance->host_no
273#define H_NO(cmd) (cmd)->device->host->host_no 267#define H_NO(cmd) (cmd)->device->host->host_no
@@ -459,47 +453,6 @@ static void free_all_tags( void )
459 453
460 454
461/* 455/*
462 * Function: void merge_contiguous_buffers(struct scsi_cmnd *cmd)
463 *
464 * Purpose: Try to merge several scatter-gather requests into one DMA
465 * transfer. This is possible if the scatter buffers lie on
466 * physical contiguous addresses.
467 *
468 * Parameters: struct scsi_cmnd *cmd
469 * The command to work on. The first scatter buffer's data are
470 * assumed to be already transferred into ptr/this_residual.
471 */
472
473static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
474{
475 unsigned long endaddr;
476#if (NDEBUG & NDEBUG_MERGING)
477 unsigned long oldlen = cmd->SCp.this_residual;
478 int cnt = 1;
479#endif
480
481 for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
482 cmd->SCp.buffers_residual &&
483 virt_to_phys(SGADDR(&(cmd->SCp.buffer[1]))) == endaddr; ) {
484
485 MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
486 SGADDR(&(cmd->SCp.buffer[1])), endaddr);
487#if (NDEBUG & NDEBUG_MERGING)
488 ++cnt;
489#endif
490 ++cmd->SCp.buffer;
491 --cmd->SCp.buffers_residual;
492 cmd->SCp.this_residual += cmd->SCp.buffer->length;
493 endaddr += cmd->SCp.buffer->length;
494 }
495#if (NDEBUG & NDEBUG_MERGING)
496 if (oldlen != cmd->SCp.this_residual)
497 MER_PRINTK("merged %d buffers from %p, new length %08x\n",
498 cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
499#endif
500}
501
502/*
503 * Function : void initialize_SCp(struct scsi_cmnd *cmd) 456 * Function : void initialize_SCp(struct scsi_cmnd *cmd)
504 * 457 *
505 * Purpose : initialize the saved data pointers for cmd to point to the 458 * Purpose : initialize the saved data pointers for cmd to point to the
@@ -520,11 +473,6 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
520 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; 473 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
521 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 474 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
522 cmd->SCp.this_residual = cmd->SCp.buffer->length; 475 cmd->SCp.this_residual = cmd->SCp.buffer->length;
523
524 /* ++roman: Try to merge some scatter-buffers if they are at
525 * contiguous physical addresses.
526 */
527// merge_contiguous_buffers( cmd );
528 } else { 476 } else {
529 cmd->SCp.buffer = NULL; 477 cmd->SCp.buffer = NULL;
530 cmd->SCp.buffers_residual = 0; 478 cmd->SCp.buffers_residual = 0;
@@ -841,7 +789,7 @@ static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer,
841 * 789 *
842 */ 790 */
843 791
844static int NCR5380_init (struct Scsi_Host *instance, int flags) 792static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
845{ 793{
846 int i; 794 int i;
847 SETUP_HOSTDATA(instance); 795 SETUP_HOSTDATA(instance);
@@ -889,6 +837,11 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags)
889 return 0; 837 return 0;
890} 838}
891 839
840static void NCR5380_exit(struct Scsi_Host *instance)
841{
842 /* Empty, as we didn't schedule any delayed work */
843}
844
892/* 845/*
893 * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, 846 * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd,
894 * void (*done)(struct scsi_cmnd *)) 847 * void (*done)(struct scsi_cmnd *))
@@ -962,7 +915,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
962 * in a queue 915 * in a queue
963 */ 916 */
964 917
965 NEXT(cmd) = NULL; 918 SET_NEXT(cmd, NULL);
966 cmd->scsi_done = done; 919 cmd->scsi_done = done;
967 920
968 cmd->result = 0; 921 cmd->result = 0;
@@ -990,14 +943,14 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
990 */ 943 */
991 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { 944 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
992 LIST(cmd, hostdata->issue_queue); 945 LIST(cmd, hostdata->issue_queue);
993 NEXT(cmd) = hostdata->issue_queue; 946 SET_NEXT(cmd, hostdata->issue_queue);
994 hostdata->issue_queue = cmd; 947 hostdata->issue_queue = cmd;
995 } else { 948 } else {
996 for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; 949 for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
997 NEXT(tmp); tmp = NEXT(tmp)) 950 NEXT(tmp); tmp = NEXT(tmp))
998 ; 951 ;
999 LIST(cmd, tmp); 952 LIST(cmd, tmp);
1000 NEXT(tmp) = cmd; 953 SET_NEXT(tmp, cmd);
1001 } 954 }
1002 955
1003 local_irq_restore(flags); 956 local_irq_restore(flags);
@@ -1105,12 +1058,12 @@ static void NCR5380_main (struct work_struct *bl)
1105 local_irq_disable(); 1058 local_irq_disable();
1106 if (prev) { 1059 if (prev) {
1107 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); 1060 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
1108 NEXT(prev) = NEXT(tmp); 1061 SET_NEXT(prev, NEXT(tmp));
1109 } else { 1062 } else {
1110 REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); 1063 REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
1111 hostdata->issue_queue = NEXT(tmp); 1064 hostdata->issue_queue = NEXT(tmp);
1112 } 1065 }
1113 NEXT(tmp) = NULL; 1066 SET_NEXT(tmp, NULL);
1114 1067
1115 /* reenable interrupts after finding one */ 1068 /* reenable interrupts after finding one */
1116 local_irq_restore(flags); 1069 local_irq_restore(flags);
@@ -1144,7 +1097,7 @@ static void NCR5380_main (struct work_struct *bl)
1144 } else { 1097 } else {
1145 local_irq_disable(); 1098 local_irq_disable();
1146 LIST(tmp, hostdata->issue_queue); 1099 LIST(tmp, hostdata->issue_queue);
1147 NEXT(tmp) = hostdata->issue_queue; 1100 SET_NEXT(tmp, hostdata->issue_queue);
1148 hostdata->issue_queue = tmp; 1101 hostdata->issue_queue = tmp;
1149#ifdef SUPPORT_TAGS 1102#ifdef SUPPORT_TAGS
1150 cmd_free_tag( tmp ); 1103 cmd_free_tag( tmp );
@@ -1439,7 +1392,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1439 local_irq_restore(flags); 1392 local_irq_restore(flags);
1440 1393
1441 /* Wait for arbitration logic to complete */ 1394 /* Wait for arbitration logic to complete */
1442#if NCR_TIMEOUT 1395#ifdef NCR_TIMEOUT
1443 { 1396 {
1444 unsigned long timeout = jiffies + 2*NCR_TIMEOUT; 1397 unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
1445 1398
@@ -2070,11 +2023,6 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2070 --cmd->SCp.buffers_residual; 2023 --cmd->SCp.buffers_residual;
2071 cmd->SCp.this_residual = cmd->SCp.buffer->length; 2024 cmd->SCp.this_residual = cmd->SCp.buffer->length;
2072 cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); 2025 cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
2073
2074 /* ++roman: Try to merge some scatter-buffers if
2075 * they are at contiguous physical addresses.
2076 */
2077// merge_contiguous_buffers( cmd );
2078 INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", 2026 INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
2079 HOSTNO, cmd->SCp.this_residual, 2027 HOSTNO, cmd->SCp.this_residual,
2080 cmd->SCp.buffers_residual); 2028 cmd->SCp.buffers_residual);
@@ -2274,7 +2222,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2274 2222
2275 local_irq_save(flags); 2223 local_irq_save(flags);
2276 LIST(cmd,hostdata->issue_queue); 2224 LIST(cmd,hostdata->issue_queue);
2277 NEXT(cmd) = hostdata->issue_queue; 2225 SET_NEXT(cmd, hostdata->issue_queue);
2278 hostdata->issue_queue = (struct scsi_cmnd *) cmd; 2226 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2279 local_irq_restore(flags); 2227 local_irq_restore(flags);
2280 QU_PRINTK("scsi%d: REQUEST SENSE added to head of " 2228 QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
@@ -2330,7 +2278,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2330 local_irq_save(flags); 2278 local_irq_save(flags);
2331 cmd->device->disconnect = 1; 2279 cmd->device->disconnect = 1;
2332 LIST(cmd,hostdata->disconnected_queue); 2280 LIST(cmd,hostdata->disconnected_queue);
2333 NEXT(cmd) = hostdata->disconnected_queue; 2281 SET_NEXT(cmd, hostdata->disconnected_queue);
2334 hostdata->connected = NULL; 2282 hostdata->connected = NULL;
2335 hostdata->disconnected_queue = cmd; 2283 hostdata->disconnected_queue = cmd;
2336 local_irq_restore(flags); 2284 local_irq_restore(flags);
@@ -2589,12 +2537,12 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
2589 ) { 2537 ) {
2590 if (prev) { 2538 if (prev) {
2591 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); 2539 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
2592 NEXT(prev) = NEXT(tmp); 2540 SET_NEXT(prev, NEXT(tmp));
2593 } else { 2541 } else {
2594 REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); 2542 REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
2595 hostdata->disconnected_queue = NEXT(tmp); 2543 hostdata->disconnected_queue = NEXT(tmp);
2596 } 2544 }
2597 NEXT(tmp) = NULL; 2545 SET_NEXT(tmp, NULL);
2598 break; 2546 break;
2599 } 2547 }
2600 } 2548 }
@@ -2762,7 +2710,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2762 if (cmd == tmp) { 2710 if (cmd == tmp) {
2763 REMOVE(5, *prev, tmp, NEXT(tmp)); 2711 REMOVE(5, *prev, tmp, NEXT(tmp));
2764 (*prev) = NEXT(tmp); 2712 (*prev) = NEXT(tmp);
2765 NEXT(tmp) = NULL; 2713 SET_NEXT(tmp, NULL);
2766 tmp->result = DID_ABORT << 16; 2714 tmp->result = DID_ABORT << 16;
2767 local_irq_restore(flags); 2715 local_irq_restore(flags);
2768 ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", 2716 ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
@@ -2835,7 +2783,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2835 if (cmd == tmp) { 2783 if (cmd == tmp) {
2836 REMOVE(5, *prev, tmp, NEXT(tmp)); 2784 REMOVE(5, *prev, tmp, NEXT(tmp));
2837 *prev = NEXT(tmp); 2785 *prev = NEXT(tmp);
2838 NEXT(tmp) = NULL; 2786 SET_NEXT(tmp, NULL);
2839 tmp->result = DID_ABORT << 16; 2787 tmp->result = DID_ABORT << 16;
2840 /* We must unlock the tag/LUN immediately here, since the 2788 /* We must unlock the tag/LUN immediately here, since the
2841 * target goes to BUS FREE and doesn't send us another 2789 * target goes to BUS FREE and doesn't send us another
@@ -2943,7 +2891,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2943 2891
2944 for (i = 0; (cmd = disconnected_queue); ++i) { 2892 for (i = 0; (cmd = disconnected_queue); ++i) {
2945 disconnected_queue = NEXT(cmd); 2893 disconnected_queue = NEXT(cmd);
2946 NEXT(cmd) = NULL; 2894 SET_NEXT(cmd, NULL);
2947 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); 2895 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2948 cmd->scsi_done( cmd ); 2896 cmd->scsi_done( cmd );
2949 } 2897 }
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 613f5880d135..baf7328de956 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -70,6 +70,12 @@
70#include <asm/idprom.h> 70#include <asm/idprom.h>
71#include <asm/machines.h> 71#include <asm/machines.h>
72 72
73#define NDEBUG 0
74
75#define NDEBUG_ABORT 0x00100000
76#define NDEBUG_TAGS 0x00200000
77#define NDEBUG_MERGING 0x00400000
78
73/* dma on! */ 79/* dma on! */
74#define REAL_DMA 80#define REAL_DMA
75 81
@@ -86,8 +92,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
86/*#define RESET_BOOT */ 92/*#define RESET_BOOT */
87#define DRIVER_SETUP 93#define DRIVER_SETUP
88 94
89#define NDEBUG 0
90
91/* 95/*
92 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels 96 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
93 */ 97 */
@@ -195,7 +199,7 @@ static struct Scsi_Host *default_instance;
195 * 199 *
196 */ 200 */
197 201
198int sun3scsi_detect(struct scsi_host_template * tpnt) 202int __init sun3scsi_detect(struct scsi_host_template * tpnt)
199{ 203{
200 unsigned long ioaddr; 204 unsigned long ioaddr;
201 static int called = 0; 205 static int called = 0;
@@ -314,6 +318,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt)
314 318
315 iounmap((void *)sun3_scsi_regp); 319 iounmap((void *)sun3_scsi_regp);
316 320
321 NCR5380_exit(shpnt);
317 return 0; 322 return 0;
318} 323}
319 324
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index 7c526b8e30ac..fbba78e5722e 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -39,6 +39,12 @@
39/* dma on! */ 39/* dma on! */
40#define REAL_DMA 40#define REAL_DMA
41 41
42#define NDEBUG 0
43
44#define NDEBUG_ABORT 0x00100000
45#define NDEBUG_TAGS 0x00200000
46#define NDEBUG_MERGING 0x00400000
47
42#include "scsi.h" 48#include "scsi.h"
43#include "initio.h" 49#include "initio.h"
44#include <scsi/scsi_host.h> 50#include <scsi/scsi_host.h>
@@ -50,8 +56,6 @@ extern int sun3_map_test(unsigned long, char *);
50/*#define RESET_BOOT */ 56/*#define RESET_BOOT */
51#define DRIVER_SETUP 57#define DRIVER_SETUP
52 58
53#define NDEBUG 0
54
55/* 59/*
56 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels 60 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
57 */ 61 */
@@ -137,7 +141,7 @@ static struct Scsi_Host *default_instance;
137 * 141 *
138 */ 142 */
139 143
140static int sun3scsi_detect(struct scsi_host_template * tpnt) 144static int __init sun3scsi_detect(struct scsi_host_template * tpnt)
141{ 145{
142 unsigned long ioaddr, irq = 0; 146 unsigned long ioaddr, irq = 0;
143 static int called = 0; 147 static int called = 0;
@@ -283,6 +287,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt)
283 287
284 iounmap((void *)sun3_scsi_regp); 288 iounmap((void *)sun3_scsi_regp);
285 289
290 NCR5380_exit(shpnt);
286 return 0; 291 return 0;
287} 292}
288 293
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index a9e9a31da11d..a6bfb6deba94 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -264,8 +264,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
264 cmd->sg_cnt = 264 cmd->sg_cnt =
265 se_cmd->t_tasks_sg_chained_no; 265 se_cmd->t_tasks_sg_chained_no;
266 } 266 }
267 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, 267 if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
268 cmd->sg, cmd->sg_cnt)) 268 cmd->sg,
269 cmd->sg_cnt))
269 cmd->was_ddp_setup = 1; 270 cmd->was_ddp_setup = 1;
270 } 271 }
271 } 272 }
@@ -371,12 +372,23 @@ static void ft_send_resp_status(struct fc_lport *lport,
371 372
372/* 373/*
373 * Send error or task management response. 374 * Send error or task management response.
374 * Always frees the cmd and associated state.
375 */ 375 */
376static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code) 376static void ft_send_resp_code(struct ft_cmd *cmd,
377 enum fcp_resp_rsp_codes code)
377{ 378{
378 ft_send_resp_status(cmd->sess->tport->lport, 379 ft_send_resp_status(cmd->sess->tport->lport,
379 cmd->req_frame, SAM_STAT_GOOD, code); 380 cmd->req_frame, SAM_STAT_GOOD, code);
381}
382
383
384/*
385 * Send error or task management response.
386 * Always frees the cmd and associated state.
387 */
388static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
389 enum fcp_resp_rsp_codes code)
390{
391 ft_send_resp_code(cmd, code);
380 ft_free_cmd(cmd); 392 ft_free_cmd(cmd);
381} 393}
382 394
@@ -414,7 +426,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
414 * tm_flags set is invalid. 426 * tm_flags set is invalid.
415 */ 427 */
416 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); 428 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
417 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); 429 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
418 return; 430 return;
419 } 431 }
420 432
@@ -422,7 +434,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
422 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); 434 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
423 if (!tmr) { 435 if (!tmr) {
424 pr_debug("alloc failed\n"); 436 pr_debug("alloc failed\n");
425 ft_send_resp_code(cmd, FCP_TMF_FAILED); 437 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
426 return; 438 return;
427 } 439 }
428 cmd->se_cmd.se_tmr_req = tmr; 440 cmd->se_cmd.se_tmr_req = tmr;
@@ -661,7 +673,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
661 return; 673 return;
662 674
663err: 675err:
664 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); 676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
665} 677}
666 678
667/* 679/*
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index f1e6c184f14f..f0a2f8b0aa13 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -92,6 +92,13 @@ struct iscsi_boot_kobj {
92 * properties. 92 * properties.
93 */ 93 */
94 mode_t (*is_visible) (void *data, int type); 94 mode_t (*is_visible) (void *data, int type);
95
96 /*
97 * Driver specific release function.
98 *
99 * The function should free the data passed in.
100 */
101 void (*release) (void *data);
95}; 102};
96 103
97struct iscsi_boot_kset { 104struct iscsi_boot_kset {
@@ -103,18 +110,21 @@ struct iscsi_boot_kobj *
103iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, 110iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
104 void *data, 111 void *data,
105 ssize_t (*show) (void *data, int type, char *buf), 112 ssize_t (*show) (void *data, int type, char *buf),
106 mode_t (*is_visible) (void *data, int type)); 113 mode_t (*is_visible) (void *data, int type),
114 void (*release) (void *data));
107 115
108struct iscsi_boot_kobj * 116struct iscsi_boot_kobj *
109iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, 117iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
110 void *data, 118 void *data,
111 ssize_t (*show) (void *data, int type, char *buf), 119 ssize_t (*show) (void *data, int type, char *buf),
112 mode_t (*is_visible) (void *data, int type)); 120 mode_t (*is_visible) (void *data, int type),
121 void (*release) (void *data));
113struct iscsi_boot_kobj * 122struct iscsi_boot_kobj *
114iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, 123iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
115 void *data, 124 void *data,
116 ssize_t (*show) (void *data, int type, char *buf), 125 ssize_t (*show) (void *data, int type, char *buf),
117 mode_t (*is_visible) (void *data, int type)); 126 mode_t (*is_visible) (void *data, int type),
127 void (*release) (void *data));
118 128
119struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); 129struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
120struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); 130struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index dd0a52cea95a..ea68b3c56dbf 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -60,7 +60,7 @@ struct iscsi_hdr {
60 uint8_t rsvd2[2]; 60 uint8_t rsvd2[2];
61 uint8_t hlength; /* AHSs total length */ 61 uint8_t hlength; /* AHSs total length */
62 uint8_t dlength[3]; /* Data length */ 62 uint8_t dlength[3]; /* Data length */
63 uint8_t lun[8]; 63 struct scsi_lun lun;
64 itt_t itt; /* Initiator Task Tag, opaque for target */ 64 itt_t itt; /* Initiator Task Tag, opaque for target */
65 __be32 ttt; /* Target Task Tag */ 65 __be32 ttt; /* Target Task Tag */
66 __be32 statsn; 66 __be32 statsn;
@@ -122,7 +122,7 @@ struct iscsi_cmd {
122 __be16 rsvd2; 122 __be16 rsvd2;
123 uint8_t hlength; 123 uint8_t hlength;
124 uint8_t dlength[3]; 124 uint8_t dlength[3];
125 uint8_t lun[8]; 125 struct scsi_lun lun;
126 itt_t itt; /* Initiator Task Tag */ 126 itt_t itt; /* Initiator Task Tag */
127 __be32 data_length; 127 __be32 data_length;
128 __be32 cmdsn; 128 __be32 cmdsn;
@@ -198,7 +198,7 @@ struct iscsi_async {
198 uint8_t rsvd2[2]; 198 uint8_t rsvd2[2];
199 uint8_t rsvd3; 199 uint8_t rsvd3;
200 uint8_t dlength[3]; 200 uint8_t dlength[3];
201 uint8_t lun[8]; 201 struct scsi_lun lun;
202 uint8_t rsvd4[8]; 202 uint8_t rsvd4[8];
203 __be32 statsn; 203 __be32 statsn;
204 __be32 exp_cmdsn; 204 __be32 exp_cmdsn;
@@ -226,7 +226,7 @@ struct iscsi_nopout {
226 __be16 rsvd2; 226 __be16 rsvd2;
227 uint8_t rsvd3; 227 uint8_t rsvd3;
228 uint8_t dlength[3]; 228 uint8_t dlength[3];
229 uint8_t lun[8]; 229 struct scsi_lun lun;
230 itt_t itt; /* Initiator Task Tag */ 230 itt_t itt; /* Initiator Task Tag */
231 __be32 ttt; /* Target Transfer Tag */ 231 __be32 ttt; /* Target Transfer Tag */
232 __be32 cmdsn; 232 __be32 cmdsn;
@@ -241,7 +241,7 @@ struct iscsi_nopin {
241 __be16 rsvd2; 241 __be16 rsvd2;
242 uint8_t rsvd3; 242 uint8_t rsvd3;
243 uint8_t dlength[3]; 243 uint8_t dlength[3];
244 uint8_t lun[8]; 244 struct scsi_lun lun;
245 itt_t itt; /* Initiator Task Tag */ 245 itt_t itt; /* Initiator Task Tag */
246 __be32 ttt; /* Target Transfer Tag */ 246 __be32 ttt; /* Target Transfer Tag */
247 __be32 statsn; 247 __be32 statsn;
@@ -257,7 +257,7 @@ struct iscsi_tm {
257 uint8_t rsvd1[2]; 257 uint8_t rsvd1[2];
258 uint8_t hlength; 258 uint8_t hlength;
259 uint8_t dlength[3]; 259 uint8_t dlength[3];
260 uint8_t lun[8]; 260 struct scsi_lun lun;
261 itt_t itt; /* Initiator Task Tag */ 261 itt_t itt; /* Initiator Task Tag */
262 itt_t rtt; /* Reference Task Tag */ 262 itt_t rtt; /* Reference Task Tag */
263 __be32 cmdsn; 263 __be32 cmdsn;
@@ -315,7 +315,7 @@ struct iscsi_r2t_rsp {
315 uint8_t rsvd2[2]; 315 uint8_t rsvd2[2];
316 uint8_t hlength; 316 uint8_t hlength;
317 uint8_t dlength[3]; 317 uint8_t dlength[3];
318 uint8_t lun[8]; 318 struct scsi_lun lun;
319 itt_t itt; /* Initiator Task Tag */ 319 itt_t itt; /* Initiator Task Tag */
320 __be32 ttt; /* Target Transfer Tag */ 320 __be32 ttt; /* Target Transfer Tag */
321 __be32 statsn; 321 __be32 statsn;
@@ -333,7 +333,7 @@ struct iscsi_data {
333 uint8_t rsvd2[2]; 333 uint8_t rsvd2[2];
334 uint8_t rsvd3; 334 uint8_t rsvd3;
335 uint8_t dlength[3]; 335 uint8_t dlength[3];
336 uint8_t lun[8]; 336 struct scsi_lun lun;
337 itt_t itt; 337 itt_t itt;
338 __be32 ttt; 338 __be32 ttt;
339 __be32 rsvd4; 339 __be32 rsvd4;
@@ -353,7 +353,7 @@ struct iscsi_data_rsp {
353 uint8_t cmd_status; 353 uint8_t cmd_status;
354 uint8_t hlength; 354 uint8_t hlength;
355 uint8_t dlength[3]; 355 uint8_t dlength[3];
356 uint8_t lun[8]; 356 struct scsi_lun lun;
357 itt_t itt; 357 itt_t itt;
358 __be32 ttt; 358 __be32 ttt;
359 __be32 statsn; 359 __be32 statsn;
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index a3cbda4ddb5c..7d96829b0c00 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -511,6 +511,14 @@ struct libfc_function_template {
511 */ 511 */
512 int (*ddp_done)(struct fc_lport *, u16); 512 int (*ddp_done)(struct fc_lport *, u16);
513 /* 513 /*
514 * Sets up the DDP context for a given exchange id on the given
515 * scatterlist if LLD supports DDP for FCoE target.
516 *
517 * STATUS: OPTIONAL
518 */
519 int (*ddp_target)(struct fc_lport *, u16, struct scatterlist *,
520 unsigned int);
521 /*
514 * Allow LLD to fill its own Link Error Status Block 522 * Allow LLD to fill its own Link Error Status Block
515 * 523 *
516 * STATUS: OPTIONAL 524 * STATUS: OPTIONAL
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 0f4367751b71..cedcff371c88 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -115,7 +115,7 @@ struct iscsi_task {
115 /* copied values in case we need to send tmfs */ 115 /* copied values in case we need to send tmfs */
116 itt_t hdr_itt; 116 itt_t hdr_itt;
117 __be32 cmdsn; 117 __be32 cmdsn;
118 uint8_t lun[8]; 118 struct scsi_lun lun;
119 119
120 int itt; /* this ITT */ 120 int itt; /* this ITT */
121 121