aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h119
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c256
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c302
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1388
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h141
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c677
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1329
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6811
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c64
22 files changed, 17888 insertions, 1885 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111ba..1877d9811831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,62 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 /* IOCB interface function jump table entries */
461 int (*__lpfc_sli_issue_iocb)
462 (struct lpfc_hba *, uint32_t,
463 struct lpfc_iocbq *, uint32_t);
464 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
465 struct lpfc_iocbq *);
466 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
467
468
469 IOCB_t * (*lpfc_get_iocb_from_iocbq)
470 (struct lpfc_iocbq *);
471 void (*lpfc_scsi_cmd_iocb_cmpl)
472 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
473
474 /* MBOX interface function jump table entries */
475 int (*lpfc_sli_issue_mbox)
476 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
477 /* Slow-path IOCB process function jump table entries */
478 void (*lpfc_sli_handle_slow_ring_event)
479 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 uint32_t mask);
481 /* INIT device interface function jump table entries */
482 int (*lpfc_sli_hbq_to_firmware)
483 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
484 int (*lpfc_sli_brdrestart)
485 (struct lpfc_hba *);
486 int (*lpfc_sli_brdready)
487 (struct lpfc_hba *, uint32_t);
488 void (*lpfc_handle_eratt)
489 (struct lpfc_hba *);
490 void (*lpfc_stop_port)
491 (struct lpfc_hba *);
492
493
494 /* SLI4 specific HBA data structure */
495 struct lpfc_sli4_hba sli4_hba;
496
423 struct lpfc_sli sli; 497 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 498 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
499 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 500 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 501#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 502#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +504,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 504#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 505#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 506#define LPFC_SLI3_BG_ENABLED 0x20
507#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 508 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 509 uint32_t iocb_rsp_size;
434 510
@@ -442,8 +518,13 @@ struct lpfc_hba {
442 518
443 uint32_t hba_flag; /* hba generic flags */ 519 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 520#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 521#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 522#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
523#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
524#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
525#define FCP_XRI_ABORT_EVENT 0x20
526#define ELS_XRI_ABORT_EVENT 0x40
527#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 528 struct lpfc_dmabuf slim2p;
448 529
449 MAILBOX_t *mbox; 530 MAILBOX_t *mbox;
@@ -502,6 +583,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 583 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 584 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 585 uint32_t cfg_use_msi;
586 uint32_t cfg_fcp_imax;
587 uint32_t cfg_fcp_wq_count;
588 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 589 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 590 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 591 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +595,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 595 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 596 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 597 uint32_t cfg_enable_bg;
598 uint32_t cfg_enable_fip;
599 uint32_t cfg_log_verbose;
514 600
515 lpfc_vpd_t vpd; /* vital product data */ 601 lpfc_vpd_t vpd; /* vital product data */
516 602
@@ -526,11 +612,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 612 unsigned long data_flags;
527 613
528 uint32_t hbq_in_use; /* HBQs in use flag */ 614 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 615 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 616 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 617 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 618
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 619 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
620 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 621 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 622 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 623 PCI BAR0 */
@@ -593,7 +680,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 680 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 681 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 682 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 683 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
684 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 685 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 686
599 mempool_t *mbox_mem_pool; 687 mempool_t *mbox_mem_pool;
@@ -609,6 +697,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 697 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 698 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 699#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
700 uint16_t max_vports; /*
701 * For IOV HBAs max_vpi can change
702 * after a reset. max_vports is max
703 * number of vports present. This can
704 * be greater than max_vpi.
705 */
706 uint16_t vpi_base;
707 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 708 unsigned long *vpi_bmask; /* vpi allocation table */
613 709
614 /* Data structure used by fabric iocb scheduler */ 710 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +763,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 763/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 764#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 765 atomic_t fast_event_count;
766 struct lpfc_fcf fcf;
767 uint8_t fc_map[3];
768 uint8_t valid_vlan;
769 uint16_t vlan_id;
770 struct list_head fcf_conn_rec_list;
670}; 771};
671 772
672static inline struct Scsi_Host * 773static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb125..fc07be5fbce9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3034,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3034 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3035 struct lpfc_hba *phba = vport->phba; 3114 struct lpfc_hba *phba = vport->phba;
3036 3115
3116 if (phba->sli_rev >= LPFC_SLI_REV4)
3117 return -EPERM;
3118
3037 if ((off + count) > FF_REG_AREA_SIZE) 3119 if ((off + count) > FF_REG_AREA_SIZE)
3038 return -ERANGE; 3120 return -ERANGE;
3039 3121
@@ -3084,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3084 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3166 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3085 struct lpfc_hba *phba = vport->phba; 3167 struct lpfc_hba *phba = vport->phba;
3086 3168
3169 if (phba->sli_rev >= LPFC_SLI_REV4)
3170 return -EPERM;
3171
3087 if (off > FF_REG_AREA_SIZE) 3172 if (off > FF_REG_AREA_SIZE)
3088 return -ERANGE; 3173 return -ERANGE;
3089 3174
@@ -3199,7 +3284,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3284 }
3200 } 3285 }
3201 3286
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3287 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3288 buf, count);
3204 3289
3205 phba->sysfs_mbox.offset = off + count; 3290 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3326,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3326 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3327 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3328 int rc;
3329 MAILBOX_t *pmb;
3244 3330
3245 if (off > MAILBOX_CMD_SIZE) 3331 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3332 return -ERANGE;
@@ -3265,8 +3351,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3351 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3352 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3353 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3354 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3355 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3356 /* Offline only */
3271 case MBX_INIT_LINK: 3357 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3358 case MBX_DOWN_LINK:
@@ -3283,7 +3369,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3369 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3370 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3371 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3372 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3373 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3374 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3375 return -EPERM;
@@ -3319,13 +3405,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3405 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3406 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3411 return -EPERM;
3326 default: 3412 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3413 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3414 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3415 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3416 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3417 return -EPERM;
@@ -3335,14 +3421,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3421 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3422 */
3337 if (phba->pport->stopped && 3423 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3424 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3425 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3426 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3427 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3428 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3429 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3430 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3431 pmb->mbxCommand);
3346 3432
3347 phba->sysfs_mbox.mbox->vport = vport; 3433 phba->sysfs_mbox.mbox->vport = vport;
3348 3434
@@ -3356,7 +3442,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3442 }
3357 3443
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3444 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3445 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3446
3361 spin_unlock_irq(&phba->hbalock); 3447 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3448 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3454,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3454 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3455 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3456 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3457 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3458 spin_lock_irq(&phba->hbalock);
3374 } 3459 }
3375 3460
@@ -3391,7 +3476,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3476 return -EAGAIN;
3392 } 3477 }
3393 3478
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3479 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3480
3396 phba->sysfs_mbox.offset = off + count; 3481 phba->sysfs_mbox.offset = off + count;
3397 3482
@@ -3585,6 +3670,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3670 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3671 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3672 break;
3673 case LA_10GHZ_LINK:
3674 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3675 break;
3588 default: 3676 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3677 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3678 break;
@@ -3652,7 +3740,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3740 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3741 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3742 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3743 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3744 return NULL;
3657 3745
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3746 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3751,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3751 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3752 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3753
3666 pmb = &pmboxq->mb; 3754 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3755 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3756 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3757 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3758 pmboxq->vport = vport;
3671 3759
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3760 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3761 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3762 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3763 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3764 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3783,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3783 pmboxq->vport = vport;
3696 3784
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3785 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3786 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3787 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3788 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3789 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3857,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3857 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3858 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3859
3772 pmb = &pmboxq->mb; 3860 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3861 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3862 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3863 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3865,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3865 pmboxq->vport = vport;
3778 3866
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3867 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3868 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3869 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3870 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3871 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3883,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3883 pmboxq->vport = vport;
3796 3884
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3885 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3886 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3887 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3888 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3889 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4050,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4050 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4051}
3964 4052
4053/**
4054 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4055 * @phba: Pointer to lpfc_hba struct.
4056 *
4057 * This function is called by the lpfc_get_cfgparam() routine to set the
4058 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4059 * log messsage according to the module's lpfc_log_verbose parameter setting
4060 * before hba port or vport created.
4061 **/
4062static void
4063lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4064{
4065 phba->cfg_log_verbose = verbose;
4066}
4067
3965struct fc_function_template lpfc_transport_functions = { 4068struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4069 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4070 .show_host_node_name = 1,
@@ -4105,6 +4208,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4208 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4209 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4210 lpfc_use_msi_init(phba, lpfc_use_msi);
4211 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4212 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4213 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4214 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4215 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4216 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4219,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4219 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4220 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4221 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4222 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4223 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4224 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4225
4136 return; 4226 return;
4137} 4227}
4138 4228
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f26190..d2a922997c0f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e5..0e532f072eb3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1729,8 +1732,10 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1729 uint32_t *ptr, str[4]; 1732 uint32_t *ptr, str[4];
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (phba->sli_rev == LPFC_SLI_REV4)
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 sprintf(fwrevision, "%s", vp->rev.opFwName);
1737 else if (vp->rev.rBit) {
1738 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1739 rev = vp->rev.sli2FwRev;
1735 else 1740 else
1736 rev = vp->rev.sli1FwRev; 1741 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1761,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1761 }
1757 b4 = (rev & 0x0000000f); 1762 b4 = (rev & 0x0000000f);
1758 1763
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1764 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1765 fwname = vp->rev.sli2FwName;
1761 else 1766 else
1762 fwname = vp->rev.sli1FwName; 1767 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07a..8d0f0de76b63 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -51,8 +53,7 @@
51 * debugfs interface 53 * debugfs interface
52 * 54 *
53 * To access this interface the user should: 55 * To access this interface the user should:
54 * # mkdir /debug 56 * # mount -t debugfs none /sys/kernel/debug
55 * # mount -t debugfs none /debug
56 * 57 *
57 * The lpfc debugfs directory hierarchy is: 58 * The lpfc debugfs directory hierarchy is:
58 * lpfc/lpfcX/vportY 59 * lpfc/lpfcX/vportY
@@ -280,6 +281,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 281 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 282 struct hbq_dmabuf *hbq_buf;
282 283
284 if (phba->sli_rev != 3)
285 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 286 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 287 spin_lock_irq(&phba->hbalock);
285 288
@@ -489,12 +492,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 492 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 493 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 494 }
492 word0 = readl(phba->HAregaddr); 495
493 word1 = readl(phba->CAregaddr); 496 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 497 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 498 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 499 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 500 word3 = readl(phba->HCregaddr);
501 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
502 "HC:%08x\n", word0, word1, word2, word3);
503 }
498 spin_unlock_irq(&phba->hbalock); 504 spin_unlock_irq(&phba->hbalock);
499 return len; 505 return len;
500} 506}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd108972072..1142070e9484 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d2..f72fdf23bf1b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -165,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
165 if (elsiocb == NULL) 168 if (elsiocb == NULL)
166 return NULL; 169 return NULL;
167 170
171 /*
172 * If this command is for fabric controller and HBA running
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */
175 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS;
181 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
183
168 icmd = &elsiocb->iocb; 184 icmd = &elsiocb->iocb;
169 185
170 /* fill in BDEs for command */ 186 /* fill in BDEs for command */
@@ -219,7 +235,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 235 icmd->un.elsreq64.myID = vport->fc_myDID;
220 236
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 237 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 238 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 239 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 240 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 241 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +321,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 321 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 322 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 323 **/
308static int 324int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 325lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 326{
311 struct lpfc_hba *phba = vport->phba; 327 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +361,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 361 err = 4;
346 goto fail; 362 goto fail;
347 } 363 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 364 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 365 if (rc) {
351 err = 5; 366 err = 5;
352 goto fail_free_mbox; 367 goto fail_free_mbox;
@@ -386,6 +401,75 @@ fail:
386} 401}
387 402
388/** 403/**
404 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
405 * @vport: pointer to a host virtual N_Port data structure.
406 *
407 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
408 * the @vport. This mailbox command is necessary for FCoE only.
409 *
410 * Return code
411 * 0 - successfully issued REG_VFI for @vport
412 * A failure code otherwise.
413 **/
414static int
415lpfc_issue_reg_vfi(struct lpfc_vport *vport)
416{
417 struct lpfc_hba *phba = vport->phba;
418 LPFC_MBOXQ_t *mboxq;
419 struct lpfc_nodelist *ndlp;
420 struct serv_parm *sp;
421 struct lpfc_dmabuf *dmabuf;
422 int rc = 0;
423
424 sp = &phba->fc_fabparam;
425 ndlp = lpfc_findnode_did(vport, Fabric_DID);
426 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
427 rc = -ENODEV;
428 goto fail;
429 }
430
431 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
432 if (!dmabuf) {
433 rc = -ENOMEM;
434 goto fail;
435 }
436 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
437 if (!dmabuf->virt) {
438 rc = -ENOMEM;
439 goto fail_free_dmabuf;
440 }
441 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
442 if (!mboxq) {
443 rc = -ENOMEM;
444 goto fail_free_coherent;
445 }
446 vport->port_state = LPFC_FABRIC_CFG_LINK;
447 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
448 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
449 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
450 mboxq->vport = vport;
451 mboxq->context1 = dmabuf;
452 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
453 if (rc == MBX_NOT_FINISHED) {
454 rc = -ENXIO;
455 goto fail_free_mbox;
456 }
457 return 0;
458
459fail_free_mbox:
460 mempool_free(mboxq, phba->mbox_mem_pool);
461fail_free_coherent:
462 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
463fail_free_dmabuf:
464 kfree(dmabuf);
465fail:
466 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
467 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
468 "0289 Issue Register VFI failed: Err %d\n", rc);
469 return rc;
470}
471
472/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 473 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 474 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 475 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +581,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 581 }
498 } 582 }
499 583
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 584 if (phba->sli_rev < LPFC_SLI_REV4) {
501 585 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 586 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 587 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 588 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 589 else
590 lpfc_issue_fabric_reglogin(vport);
591 } else {
592 ndlp->nlp_type |= NLP_FABRIC;
593 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
594 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
595 lpfc_start_fdiscs(phba);
596 lpfc_do_scr_ns_plogi(phba, vport);
597 } else
598 lpfc_issue_reg_vfi(vport);
506 } 599 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 600 return 0;
509} 601}
510
511/** 602/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 603 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 604 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +906,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 906 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 907 sp->cmn.fcphHigh = FC_PH3;
817 908
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 909 if (phba->sli_rev == LPFC_SLI_REV4) {
910 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
911 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
912 /* FLOGI needs to be 3 for WQE FCFI */
913 /* Set the fcfi to the fcfi we registered with */
914 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
915 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 916 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 917 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 918 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 919 icmd->ulpCt_l = 0;
@@ -930,6 +1026,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1026 if (!ndlp)
931 return 0; 1027 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1028 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1029 /* Set the node type */
1030 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1031 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1032 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1033 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1448,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1448 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1449 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1450 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1451 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1452 uint8_t *pcmd;
1356 uint16_t cmdsize; 1453 uint16_t cmdsize;
1357 int ret; 1454 int ret;
1358 1455
1359 psli = &phba->sli; 1456 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1457
1362 ndlp = lpfc_findnode_did(vport, did); 1458 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1459 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1487,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1487
1392 phba->fc_stat.elsXmitPLOGI++; 1488 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1489 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1490 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1491
1396 if (ret == IOCB_ERROR) { 1492 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1493 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1597,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1597 PRLI *npr;
1502 IOCB_t *icmd; 1598 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1599 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1600 uint8_t *pcmd;
1507 uint16_t cmdsize; 1601 uint16_t cmdsize;
1508 1602
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1603 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1604 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1605 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1641,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1641 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1642 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1643 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1644 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1645 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1646 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1647 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1648 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1700,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1700 * and continue discovery.
1609 */ 1701 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1702 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1703 !(vport->fc_flag & FC_RSCN_MODE) &&
1704 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1705 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1706 return;
1614 } 1707 }
@@ -1788,8 +1881,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1881 ADISC *ap;
1789 IOCB_t *icmd; 1882 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1883 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1884 uint8_t *pcmd;
1794 uint16_t cmdsize; 1885 uint16_t cmdsize;
1795 1886
@@ -1822,7 +1913,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1913 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1914 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1915 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1916 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1917 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1918 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1919 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1920 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2029,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2029 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2030 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2031 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2032 uint8_t *pcmd;
1943 uint16_t cmdsize; 2033 uint16_t cmdsize;
1944 int rc; 2034 int rc;
1945 2035
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2036 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2037 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2038 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2065,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2065 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2066 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2067 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2068 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2069
1983 if (rc == IOCB_ERROR) { 2070 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2071 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2145,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2145 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2146 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2147 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2148 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2149 uint8_t *pcmd;
2064 uint16_t cmdsize; 2150 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2151 struct lpfc_nodelist *ndlp;
2066 2152
2067 psli = &phba->sli; 2153 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2154 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2155
2071 ndlp = lpfc_findnode_did(vport, nportid); 2156 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2193,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2193
2109 phba->fc_stat.elsXmitSCR++; 2194 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2195 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2196 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2197 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2198 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2199 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2200 * the node.
@@ -2152,7 +2238,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2238 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2239 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2240 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2241 struct lpfc_sli *psli;
2157 FARP *fp; 2242 FARP *fp;
2158 uint8_t *pcmd; 2243 uint8_t *pcmd;
@@ -2162,7 +2247,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2247 struct lpfc_nodelist *ndlp;
2163 2248
2164 psli = &phba->sli; 2249 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2250 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2251
2168 ndlp = lpfc_findnode_did(vport, nportid); 2252 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2303,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2303
2220 phba->fc_stat.elsXmitFARPR++; 2304 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2305 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2306 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2307 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2308 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2309 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2310 * the node.
@@ -2949,6 +3034,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3034 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3035 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3036
3037 /*
3038 * This routine is used to register and unregister in previous SLI
3039 * modes.
3040 */
3041 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3042 (phba->sli_rev == LPFC_SLI_REV4))
3043 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3044
2952 pmb->context1 = NULL; 3045 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3046 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3047 kfree(mp);
@@ -2961,6 +3054,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3054 */
2962 lpfc_nlp_not_used(ndlp); 3055 lpfc_nlp_not_used(ndlp);
2963 } 3056 }
3057
2964 return; 3058 return;
2965} 3059}
2966 3060
@@ -3170,7 +3264,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3264 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3265 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3266 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3267 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3268 uint8_t *pcmd;
3176 uint16_t cmdsize; 3269 uint16_t cmdsize;
@@ -3178,7 +3271,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3271 ELS_PKT *els_pkt_ptr;
3179 3272
3180 psli = &phba->sli; 3273 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3274 oldcmd = &oldiocb->iocb;
3183 3275
3184 switch (flag) { 3276 switch (flag) {
@@ -3266,7 +3358,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3358 }
3267 3359
3268 phba->fc_stat.elsXmitACC++; 3360 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3361 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3362 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3363 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3364 return 1;
@@ -3305,15 +3397,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3397 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3398 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3399 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3400 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3401 uint8_t *pcmd;
3311 uint16_t cmdsize; 3402 uint16_t cmdsize;
3312 int rc; 3403 int rc;
3313 3404
3314 psli = &phba->sli; 3405 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3406 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3407 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3408 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3435,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3435
3347 phba->fc_stat.elsXmitLSRJT++; 3436 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3437 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3438 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3439
3351 if (rc == IOCB_ERROR) { 3440 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3441 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3468,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3468 struct lpfc_nodelist *ndlp)
3380{ 3469{
3381 struct lpfc_hba *phba = vport->phba; 3470 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3471 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3472 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3473 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3509,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3509
3423 phba->fc_stat.elsXmitACC++; 3510 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3511 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3512 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3513 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3514 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3515 return 1;
@@ -3459,14 +3546,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3546 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3547 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3548 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3549 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3550 uint8_t *pcmd;
3465 uint16_t cmdsize; 3551 uint16_t cmdsize;
3466 int rc; 3552 int rc;
3467 3553
3468 psli = &phba->sli; 3554 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3555
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3556 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3557 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3605,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3605 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3606 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3607
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3608 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3609 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3610 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3611 return 1;
@@ -3562,15 +3647,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3647 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3648 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3649 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3650 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3651 uint8_t *pcmd;
3568 uint16_t cmdsize; 3652 uint16_t cmdsize;
3569 int rc; 3653 int rc;
3570 3654
3571 psli = &phba->sli; 3655 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3656 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3657 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3658 if (format)
@@ -3626,7 +3708,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3708 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3709 * it could be freed */
3628 3710
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3711 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3712 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3713 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3714 return 1;
@@ -3839,7 +3921,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3921 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3922 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3923 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3924 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3925 && (ns_did.un.b.area == rscn_did.un.b.area)
3926 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3927 goto return_did_out;
3844 break; 3928 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3929 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4384,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4384 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4385 phba->cfg_topology,
4302 phba->cfg_link_speed); 4386 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4387 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4388 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4389 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4390 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4524,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4524static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4525lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4526{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4527 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4528 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4529 RPS_RSP *rps_rsp;
@@ -4451,7 +4533,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4533 uint16_t xri, status;
4452 uint32_t cmdsize; 4534 uint32_t cmdsize;
4453 4535
4454 mb = &pmb->mb; 4536 mb = &pmb->u.mb;
4455 4537
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4538 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4539 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4589,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4589 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4590 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4591 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4592 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4593 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4594 return;
4513} 4595}
@@ -4616,8 +4698,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4698 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4699 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4700 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4701 uint8_t *pcmd;
4622 4702
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4703 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4734,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4734 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4735 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4736 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4737 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4738 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4739 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4740 return 1;
4660 } 4741 }
@@ -4883,7 +4964,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4964 } else {
4884 /* FAN verified - skip FLOGI */ 4965 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4966 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4967 if (phba->sli_rev < LPFC_SLI_REV4)
4968 lpfc_issue_fabric_reglogin(vport);
4969 else
4970 lpfc_issue_reg_vfi(vport);
4887 } 4971 }
4888 } 4972 }
4889 return 0; 4973 return 0;
@@ -5566,11 +5650,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5650
5567dropit: 5651dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5652 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5653 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5654 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5655 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5656 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5657 phba->fc_stat.elsRcvDrop++;
5575} 5658}
5576 5659
@@ -5646,10 +5729,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5729 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5730 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5731 vport = phba->pport;
5649 else { 5732 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5733 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5734 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5735 }
5654 /* If there are no BDEs associated 5736 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5737 * with this IOCB, there is nothing to do.
@@ -5781,7 +5863,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5863 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5864 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5865 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5866 MAILBOX_t *mb = &pmb->u.mb;
5785 5867
5786 spin_lock_irq(shost->host_lock); 5868 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5869 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5900,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5900
5819 } else { 5901 } else {
5820 if (vport == phba->pport) 5902 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5903 if (phba->sli_rev < LPFC_SLI_REV4)
5904 lpfc_issue_fabric_reglogin(vport);
5905 else
5906 lpfc_issue_reg_vfi(vport);
5822 else 5907 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5908 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5909 }
@@ -5850,7 +5935,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5935
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5937 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5938 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5939 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5940 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5941 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6036,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6036 icmd->un.elsreq64.myID = 0; 6121 icmd->un.elsreq64.myID = 0;
6037 icmd->un.elsreq64.fl = 1; 6122 icmd->un.elsreq64.fl = 1;
6038 6123
6039 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 6124 if (phba->sli_rev == LPFC_SLI_REV4) {
6040 icmd->ulpCt_h = 1; 6125 /* FDISC needs to be 1 for WQE VPI */
6041 icmd->ulpCt_l = 0; 6126 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6127 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6128 /* Set the ulpContext to the vpi */
6129 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6130 } else {
6131 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6132 icmd->ulpCt_h = 1;
6133 icmd->ulpCt_l = 0;
6134 }
6042 6135
6043 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6136 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6044 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 6137 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
@@ -6139,7 +6232,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6232{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6233 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6234 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6235 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6236 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6237 uint8_t *pcmd;
@@ -6169,7 +6261,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6261 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6262 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6263 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6264 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6265 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6266 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6267 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6268 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6317,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6317 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6318 unsigned long iflags;
6226 int ret; 6319 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6320 IOCB_t *cmd;
6229 6321
6230repeat: 6322repeat:
@@ -6248,7 +6340,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6340 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6341 iocb->vport->port_state, 0, 0);
6250 6342
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6343 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6344
6253 if (ret == IOCB_ERROR) { 6345 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6346 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6486,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6486lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6487{
6396 unsigned long iflags; 6488 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6489 int ready;
6399 int ret; 6490 int ret;
6400 6491
@@ -6418,7 +6509,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6509 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6510 iocb->vport->port_state, 0, 0);
6420 6511
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6512 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6513
6423 if (ret == IOCB_ERROR) { 6514 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6515 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6615,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6615 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6616 IOERR_SLI_ABORTED);
6526} 6617}
6618
6619/**
6620 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6621 * @phba: pointer to lpfc hba data structure.
6622 * @axri: pointer to the els xri abort wcqe structure.
6623 *
6624 * This routine is invoked by the worker thread to process a SLI4 slow-path
6625 * ELS aborted xri.
6626 **/
6627void
6628lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6629 struct sli4_wcqe_xri_aborted *axri)
6630{
6631 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6632 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6633 unsigned long iflag = 0;
6634
6635 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6636 list_for_each_entry_safe(sglq_entry, sglq_next,
6637 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6638 if (sglq_entry->sli4_xritag == xri) {
6639 list_del(&sglq_entry->list);
6640 spin_unlock_irqrestore(
6641 &phba->sli4_hba.abts_sgl_list_lock,
6642 iflag);
6643 spin_lock_irqsave(&phba->hbalock, iflag);
6644
6645 list_add_tail(&sglq_entry->list,
6646 &phba->sli4_hba.lpfc_sgl_list);
6647 spin_unlock_irqrestore(&phba->hbalock, iflag);
6648 return;
6649 }
6650 }
6651 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6652}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf704..ed46b24a3380 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,612 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 /* If FCF not available return 0 */
1201 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1202 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1203 return 0;
1204
1205 if (!phba->cfg_enable_fip) {
1206 *boot_flag = 0;
1207 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1208 new_fcf_record);
1209 if (phba->valid_vlan)
1210 *vlan_id = phba->vlan_id;
1211 else
1212 *vlan_id = 0xFFFF;
1213 return 1;
1214 }
1215
1216 /*
1217 * If there are no FCF connection table entry, driver connect to all
1218 * FCFs.
1219 */
1220 if (list_empty(&phba->fcf_conn_rec_list)) {
1221 *boot_flag = 0;
1222 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1223 new_fcf_record);
1224
1225 /*
1226 * When there are no FCF connect entries, use driver's default
1227 * addressing mode - FPMA.
1228 */
1229 if (*addr_mode & LPFC_FCF_FPMA)
1230 *addr_mode = LPFC_FCF_FPMA;
1231
1232 *vlan_id = 0xFFFF;
1233 return 1;
1234 }
1235
1236 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1237 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1238 continue;
1239
1240 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1241 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1242 new_fcf_record))
1243 continue;
1244
1245 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1246 /*
1247 * If the vlan bit map does not have the bit set for the
1248 * vlan id to be used, then it is not a match.
1249 */
1250 if (!(new_fcf_record->vlan_bitmap
1251 [conn_entry->conn_rec.vlan_tag / 8] &
1252 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1253 continue;
1254 }
1255
1256 /*
1257 * If connection record does not support any addressing mode,
1258 * skip the FCF record.
1259 */
1260 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1261 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1262 continue;
1263
1264 /*
1265 * Check if the connection record specifies a required
1266 * addressing mode.
1267 */
1268 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1269 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1270
1271 /*
1272 * If SPMA required but FCF not support this continue.
1273 */
1274 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1275 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record) & LPFC_FCF_SPMA))
1277 continue;
1278
1279 /*
1280 * If FPMA required but FCF not support this continue.
1281 */
1282 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1283 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1284 new_fcf_record) & LPFC_FCF_FPMA))
1285 continue;
1286 }
1287
1288 /*
1289 * This fcf record matches filtering criteria.
1290 */
1291 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1292 *boot_flag = 1;
1293 else
1294 *boot_flag = 0;
1295
1296 /*
1297 * If user did not specify any addressing mode, or if the
1298 * prefered addressing mode specified by user is not supported
1299 * by FCF, allow fabric to pick the addressing mode.
1300 */
1301 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1302 new_fcf_record);
1303 /*
1304 * If the user specified a required address mode, assign that
1305 * address mode
1306 */
1307 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1308 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1309 *addr_mode = (conn_entry->conn_rec.flags &
1310 FCFCNCT_AM_SPMA) ?
1311 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1312 /*
1313 * If the user specified a prefered address mode, use the
1314 * addr mode only if FCF support the addr_mode.
1315 */
1316 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1317 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1318 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1319 (*addr_mode & LPFC_FCF_SPMA))
1320 *addr_mode = LPFC_FCF_SPMA;
1321 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1322 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1323 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1324 (*addr_mode & LPFC_FCF_FPMA))
1325 *addr_mode = LPFC_FCF_FPMA;
1326
1327 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1328 *vlan_id = conn_entry->conn_rec.vlan_tag;
1329 else
1330 *vlan_id = 0xFFFF;
1331
1332 return 1;
1333 }
1334
1335 return 0;
1336}
1337
1338/**
1339 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1340 * @phba: pointer to lpfc hba data structure.
1341 * @mboxq: pointer to mailbox object.
1342 *
1343 * This function iterate through all the fcf records available in
1344 * HBA and choose the optimal FCF record for discovery. After finding
1345 * the FCF for discovery it register the FCF record and kick start
1346 * discovery.
1347 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1348 * use a FCF record which match fabric name and mac address of the
1349 * currently used FCF record.
1350 * If the driver support only one FCF, it will try to use the FCF record
1351 * used by BOOT_BIOS.
1352 */
1353void
1354lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1355{
1356 void *virt_addr;
1357 dma_addr_t phys_addr;
1358 uint8_t *bytep;
1359 struct lpfc_mbx_sge sge;
1360 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1361 uint32_t shdr_status, shdr_add_status;
1362 union lpfc_sli4_cfg_shdr *shdr;
1363 struct fcf_record *new_fcf_record;
1364 int rc;
1365 uint32_t boot_flag, addr_mode;
1366 uint32_t next_fcf_index;
1367 unsigned long flags;
1368 uint16_t vlan_id;
1369
1370 /* Get the first SGE entry from the non-embedded DMA memory. This
1371 * routine only uses a single SGE.
1372 */
1373 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1374 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1375 if (unlikely(!mboxq->sge_array)) {
1376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1377 "2524 Failed to get the non-embedded SGE "
1378 "virtual address\n");
1379 goto out;
1380 }
1381 virt_addr = mboxq->sge_array->addr[0];
1382
1383 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1384 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1385 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1386 &shdr->response);
1387 /*
1388 * The FCF Record was read and there is no reason for the driver
1389 * to maintain the FCF record data or memory. Instead, just need
1390 * to book keeping the FCFIs can be used.
1391 */
1392 if (shdr_status || shdr_add_status) {
1393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1394 "2521 READ_FCF_RECORD mailbox failed "
1395 "with status x%x add_status x%x, mbx\n",
1396 shdr_status, shdr_add_status);
1397 goto out;
1398 }
1399 /* Interpreting the returned information of FCF records */
1400 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1401 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1402 sizeof(struct lpfc_mbx_read_fcf_tbl));
1403 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1404
1405 new_fcf_record = (struct fcf_record *)(virt_addr +
1406 sizeof(struct lpfc_mbx_read_fcf_tbl));
1407 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1408 sizeof(struct fcf_record));
1409 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1410
1411 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1412 &boot_flag, &addr_mode,
1413 &vlan_id);
1414 /*
1415 * If the fcf record does not match with connect list entries
1416 * read the next entry.
1417 */
1418 if (!rc)
1419 goto read_next_fcf;
1420 /*
1421 * If this is not the first FCF discovery of the HBA, use last
1422 * FCF record for the discovery.
1423 */
1424 spin_lock_irqsave(&phba->hbalock, flags);
1425 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1426 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1427 new_fcf_record) &&
1428 lpfc_mac_addr_match(phba, new_fcf_record)) {
1429 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1430 spin_unlock_irqrestore(&phba->hbalock, flags);
1431 goto out;
1432 }
1433 spin_unlock_irqrestore(&phba->hbalock, flags);
1434 goto read_next_fcf;
1435 }
1436 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1437 /*
1438 * If the current FCF record does not have boot flag
1439 * set and new fcf record has boot flag set, use the
1440 * new fcf record.
1441 */
1442 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1443 /* Use this FCF record */
1444 lpfc_copy_fcf_record(phba, new_fcf_record);
1445 phba->fcf.addr_mode = addr_mode;
1446 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1447 if (vlan_id != 0xFFFF) {
1448 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1449 phba->fcf.vlan_id = vlan_id;
1450 }
1451 spin_unlock_irqrestore(&phba->hbalock, flags);
1452 goto read_next_fcf;
1453 }
1454 /*
1455 * If the current FCF record has boot flag set and the
1456 * new FCF record does not have boot flag, read the next
1457 * FCF record.
1458 */
1459 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * If there is a record with lower priority value for
1465 * the current FCF, use that record.
1466 */
1467 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1468 && (new_fcf_record->fip_priority <
1469 phba->fcf.priority)) {
1470 /* Use this FCF record */
1471 lpfc_copy_fcf_record(phba, new_fcf_record);
1472 phba->fcf.addr_mode = addr_mode;
1473 if (vlan_id != 0xFFFF) {
1474 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1475 phba->fcf.vlan_id = vlan_id;
1476 }
1477 spin_unlock_irqrestore(&phba->hbalock, flags);
1478 goto read_next_fcf;
1479 }
1480 spin_unlock_irqrestore(&phba->hbalock, flags);
1481 goto read_next_fcf;
1482 }
1483 /*
1484 * This is the first available FCF record, use this
1485 * record.
1486 */
1487 lpfc_copy_fcf_record(phba, new_fcf_record);
1488 phba->fcf.addr_mode = addr_mode;
1489 if (boot_flag)
1490 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1491 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1492 if (vlan_id != 0xFFFF) {
1493 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1494 phba->fcf.vlan_id = vlan_id;
1495 }
1496 spin_unlock_irqrestore(&phba->hbalock, flags);
1497 goto read_next_fcf;
1498
1499read_next_fcf:
1500 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1501 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1502 lpfc_register_fcf(phba);
1503 else
1504 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1505 return;
1506
1507out:
1508 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1509 lpfc_register_fcf(phba);
1510
1511 return;
1512}
1513
1514/**
1515 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1516 * @phba: pointer to lpfc hba data structure.
1517 *
1518 * This function loops through the list of vports on the @phba and issues an
1519 * FDISC if possible.
1520 */
1521void
1522lpfc_start_fdiscs(struct lpfc_hba *phba)
1523{
1524 struct lpfc_vport **vports;
1525 int i;
1526
1527 vports = lpfc_create_vport_work_array(phba);
1528 if (vports != NULL) {
1529 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1530 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1531 continue;
1532 /* There are no vpi for this vport */
1533 if (vports[i]->vpi > phba->max_vpi) {
1534 lpfc_vport_set_state(vports[i],
1535 FC_VPORT_FAILED);
1536 continue;
1537 }
1538 if (phba->fc_topology == TOPOLOGY_LOOP) {
1539 lpfc_vport_set_state(vports[i],
1540 FC_VPORT_LINKDOWN);
1541 continue;
1542 }
1543 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1544 lpfc_initial_fdisc(vports[i]);
1545 else {
1546 lpfc_vport_set_state(vports[i],
1547 FC_VPORT_NO_FABRIC_SUPP);
1548 lpfc_printf_vlog(vports[i], KERN_ERR,
1549 LOG_ELS,
1550 "0259 No NPIV "
1551 "Fabric support\n");
1552 }
1553 }
1554 }
1555 lpfc_destroy_vport_work_array(phba, vports);
1556}
1557
1558void
1559lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1560{
1561 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1562 struct lpfc_vport *vport = mboxq->vport;
1563
1564 if (mboxq->u.mb.mbxStatus) {
1565 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1566 "2018 REG_VFI mbxStatus error x%x "
1567 "HBA state x%x\n",
1568 mboxq->u.mb.mbxStatus, vport->port_state);
1569 if (phba->fc_topology == TOPOLOGY_LOOP) {
1570 /* FLOGI failed, use loop map to make discovery list */
1571 lpfc_disc_list_loopmap(vport);
1572 /* Start discovery */
1573 lpfc_disc_start(vport);
1574 goto fail_free_mem;
1575 }
1576 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1577 goto fail_free_mem;
1578 }
1579 /* Mark the vport has registered with its VFI */
1580 vport->vfi_state |= LPFC_VFI_REGISTERED;
1581
1582 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1583 lpfc_start_fdiscs(phba);
1584 lpfc_do_scr_ns_plogi(phba, vport);
1585 }
1586
1587fail_free_mem:
1588 mempool_free(mboxq, phba->mbox_mem_pool);
1589 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1590 kfree(dmabuf);
1591 return;
1592}
1593
1594static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1595lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1596{
964 MAILBOX_t *mb = &pmb->mb; 1597 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1598 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1599 struct lpfc_vport *vport = pmb->vport;
967 1600
@@ -1012,13 +1645,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1645lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1646{
1014 struct lpfc_vport *vport = phba->pport; 1647 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1648 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1649 int i;
1017 struct lpfc_dmabuf *mp; 1650 struct lpfc_dmabuf *mp;
1018 int rc; 1651 int rc;
1652 struct fcf_record *fcf_record;
1019 1653
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1654 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1655
1023 spin_lock_irq(&phba->hbalock); 1656 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1657 switch (la->UlnkSpeed) {
@@ -1034,6 +1667,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1667 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1668 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1669 break;
1670 case LA_10GHZ_LINK:
1671 phba->fc_linkspeed = LA_10GHZ_LINK;
1672 break;
1037 default: 1673 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1674 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1675 break;
@@ -1115,22 +1751,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1751 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1752 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1753 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1754 goto out;
1121 } 1755 }
1122 } 1756 }
1123 1757
1124 if (cfglink_mbox) { 1758 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1759 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1760 if (!cfglink_mbox)
1761 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1762 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1763 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1764 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1765 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1766 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1767 if (rc == MBX_NOT_FINISHED) {
1131 return; 1768 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1769 goto out;
1770 }
1771 } else {
1772 /*
1773 * Add the driver's default FCF record at FCF index 0 now. This
1774 * is phase 1 implementation that support FCF index 0 and driver
1775 * defaults.
1776 */
1777 if (phba->cfg_enable_fip == 0) {
1778 fcf_record = kzalloc(sizeof(struct fcf_record),
1779 GFP_KERNEL);
1780 if (unlikely(!fcf_record)) {
1781 lpfc_printf_log(phba, KERN_ERR,
1782 LOG_MBOX | LOG_SLI,
1783 "2554 Could not allocate memmory for "
1784 "fcf record\n");
1785 rc = -ENODEV;
1786 goto out;
1787 }
1788
1789 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1790 LPFC_FCOE_FCF_DEF_INDEX);
1791 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1792 if (unlikely(rc)) {
1793 lpfc_printf_log(phba, KERN_ERR,
1794 LOG_MBOX | LOG_SLI,
1795 "2013 Could not manually add FCF "
1796 "record 0, status %d\n", rc);
1797 rc = -ENODEV;
1798 kfree(fcf_record);
1799 goto out;
1800 }
1801 kfree(fcf_record);
1802 }
1803 /*
1804 * The driver is expected to do FIP/FCF. Call the port
1805 * and get the FCF Table.
1806 */
1807 rc = lpfc_sli4_read_fcf_record(phba,
1808 LPFC_FCOE_FCF_GET_FIRST);
1809 if (rc)
1810 goto out;
1133 } 1811 }
1812
1813 return;
1134out: 1814out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1815 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1816 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1827,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1827 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1828 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1829 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1830 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1831 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1832 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1833 writel(control, phba->HCregaddr);
1834 readl(phba->HCregaddr); /* flush */
1835 }
1154 spin_unlock_irq(&phba->hbalock); 1836 spin_unlock_irq(&phba->hbalock);
1155} 1837}
1156 1838
@@ -1159,6 +1841,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1841{
1160 lpfc_linkdown(phba); 1842 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1843 lpfc_enable_la(phba);
1844 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1845 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1846}
1164 1847
@@ -1175,7 +1858,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1858 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1859 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1860 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1861 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1862 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1863
1181 /* Unblock ELS traffic */ 1864 /* Unblock ELS traffic */
@@ -1190,7 +1873,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1873 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1874 }
1192 1875
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1876 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1877
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1878 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1879
@@ -1201,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1201 vport->fc_flag &= ~FC_BYPASSED_MODE; 1884 vport->fc_flag &= ~FC_BYPASSED_MODE;
1202 spin_unlock_irq(shost->host_lock); 1885 spin_unlock_irq(shost->host_lock);
1203 1886
1204 if (((phba->fc_eventTag + 1) < la->eventTag) || 1887 if ((phba->fc_eventTag < la->eventTag) ||
1205 (phba->fc_eventTag == la->eventTag)) { 1888 (phba->fc_eventTag == la->eventTag)) {
1206 phba->fc_stat.LinkMultiEvent++; 1889 phba->fc_stat.LinkMultiEvent++;
1207 if (la->attType == AT_LINK_UP) 1890 if (la->attType == AT_LINK_UP)
@@ -1328,7 +2011,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 2011static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2012lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 2013{
1331 MAILBOX_t *mb = &pmb->mb; 2014 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 2015 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2016 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 2017
@@ -1381,7 +2064,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2064{
1382 struct lpfc_vport *vport = pmb->vport; 2065 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2066 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2067 MAILBOX_t *mb = &pmb->u.mb;
1385 2068
1386 switch (mb->mbxStatus) { 2069 switch (mb->mbxStatus) {
1387 case 0x0011: 2070 case 0x0011:
@@ -1416,6 +2099,128 @@ out:
1416 return; 2099 return;
1417} 2100}
1418 2101
2102/**
2103 * lpfc_create_static_vport - Read HBA config region to create static vports.
2104 * @phba: pointer to lpfc hba data structure.
2105 *
2106 * This routine issue a DUMP mailbox command for config region 22 to get
2107 * the list of static vports to be created. The function create vports
2108 * based on the information returned from the HBA.
2109 **/
2110void
2111lpfc_create_static_vport(struct lpfc_hba *phba)
2112{
2113 LPFC_MBOXQ_t *pmb = NULL;
2114 MAILBOX_t *mb;
2115 struct static_vport_info *vport_info;
2116 int rc, i;
2117 struct fc_vport_identifiers vport_id;
2118 struct fc_vport *new_fc_vport;
2119 struct Scsi_Host *shost;
2120 struct lpfc_vport *vport;
2121 uint16_t offset = 0;
2122 uint8_t *vport_buff;
2123
2124 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2125 if (!pmb) {
2126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2127 "0542 lpfc_create_static_vport failed to"
2128 " allocate mailbox memory\n");
2129 return;
2130 }
2131
2132 mb = &pmb->u.mb;
2133
2134 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2135 if (!vport_info) {
2136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2137 "0543 lpfc_create_static_vport failed to"
2138 " allocate vport_info\n");
2139 mempool_free(pmb, phba->mbox_mem_pool);
2140 return;
2141 }
2142
2143 vport_buff = (uint8_t *) vport_info;
2144 do {
2145 lpfc_dump_static_vport(phba, pmb, offset);
2146 pmb->vport = phba->pport;
2147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2148
2149 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2150 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2151 "0544 lpfc_create_static_vport failed to"
2152 " issue dump mailbox command ret 0x%x "
2153 "status 0x%x\n",
2154 rc, mb->mbxStatus);
2155 goto out;
2156 }
2157
2158 if (mb->un.varDmp.word_cnt >
2159 sizeof(struct static_vport_info) - offset)
2160 mb->un.varDmp.word_cnt =
2161 sizeof(struct static_vport_info) - offset;
2162
2163 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2164 vport_buff + offset,
2165 mb->un.varDmp.word_cnt);
2166 offset += mb->un.varDmp.word_cnt;
2167
2168 } while (mb->un.varDmp.word_cnt &&
2169 offset < sizeof(struct static_vport_info));
2170
2171
2172 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2173 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2174 != VPORT_INFO_REV)) {
2175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2176 "0545 lpfc_create_static_vport bad"
2177 " information header 0x%x 0x%x\n",
2178 le32_to_cpu(vport_info->signature),
2179 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2180
2181 goto out;
2182 }
2183
2184 shost = lpfc_shost_from_vport(phba->pport);
2185
2186 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2187 memset(&vport_id, 0, sizeof(vport_id));
2188 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2189 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2190 if (!vport_id.port_name || !vport_id.node_name)
2191 continue;
2192
2193 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2194 vport_id.vport_type = FC_PORTTYPE_NPIV;
2195 vport_id.disable = false;
2196 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2197
2198 if (!new_fc_vport) {
2199 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2200 "0546 lpfc_create_static_vport failed to"
2201 " create vport \n");
2202 continue;
2203 }
2204
2205 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2206 vport->vport_flag |= STATIC_VPORT;
2207 }
2208
2209out:
2210 /*
2211 * If this is timed out command, setting NULL to context2 tell SLI
2212 * layer not to use this buffer.
2213 */
2214 spin_lock_irq(&phba->hbalock);
2215 pmb->context2 = NULL;
2216 spin_unlock_irq(&phba->hbalock);
2217 kfree(vport_info);
2218 if (rc != MBX_TIMEOUT)
2219 mempool_free(pmb, phba->mbox_mem_pool);
2220
2221 return;
2222}
2223
1419/* 2224/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2225 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2226 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2231,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2231lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2232{
1428 struct lpfc_vport *vport = pmb->vport; 2233 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2234 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2235 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2236 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2237
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2238 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2239 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2240 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2241 if (mb->mbxStatus) {
2242 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2243 "0258 Register Fabric login error: 0x%x\n",
2244 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2245 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2246 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2247 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2260,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2260 }
1455 2261
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2262 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2263 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2264 * to the ndlp are done.
1462 */ 2265 */
@@ -1465,34 +2268,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2268 }
1466 2269
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2270 ndlp->nlp_rpi = mb->un.varWords[0];
2271 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2272 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2273 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2274
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2275 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2276 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2277 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2278 }
1498 2279
@@ -1516,13 +2297,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2297void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2298lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2299{
1519 MAILBOX_t *mb = &pmb->mb; 2300 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2301 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2302 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2303 struct lpfc_vport *vport = pmb->vport;
1523 2304
1524 if (mb->mbxStatus) { 2305 if (mb->mbxStatus) {
1525out: 2306out:
2307 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2308 "0260 Register NameServer error: 0x%x\n",
2309 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2310 /* decrement the node reference count held for this
1527 * callback function. 2311 * callback function.
1528 */ 2312 */
@@ -1546,15 +2330,13 @@ out:
1546 return; 2330 return;
1547 } 2331 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2332 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2333 return;
1553 } 2334 }
1554 2335
1555 pmb->context1 = NULL; 2336 pmb->context1 = NULL;
1556 2337
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2338 ndlp->nlp_rpi = mb->un.varWords[0];
2339 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2340 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2341 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2342
@@ -2055,7 +2837,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2837 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2838 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2839 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2840 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2841 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2842 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2843 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2884,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2884 */
2103 psli = &phba->sli; 2885 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2886 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2887 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2888 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2889 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2890 pring = &psli->ring[i];
@@ -2150,7 +2932,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2932 LPFC_MBOXQ_t *mbox;
2151 int rc; 2933 int rc;
2152 2934
2153 if (ndlp->nlp_rpi) { 2935 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2937 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2938 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2944,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2944 }
2163 lpfc_no_rpi(phba, ndlp); 2945 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2946 ndlp->nlp_rpi = 0;
2947 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2948 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2165 return 1; 2949 return 1;
2166 } 2950 }
2167 return 0; 2951 return 0;
@@ -2252,7 +3036,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3036
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3037 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3038 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3039 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3040 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3041 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3042 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3045,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3045
2262 spin_lock_irq(&phba->hbalock); 3046 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3047 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3048 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3049 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3050 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3051 if (mp) {
@@ -2309,13 +3093,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3093 int rc;
2310 3094
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3095 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3096 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3097 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3098 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3099 * allocated by the firmware.
2315 */ 3100 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3101 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3102 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3103 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3104 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3105 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3106 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3338,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3338 * clear_la then don't send it.
2554 */ 3339 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3340 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3341 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3342 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3343 return;
2558 3344
2559 /* Link up discovery */ 3345 /* Link up discovery */
@@ -2582,7 +3368,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3368
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3369 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3370 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3371 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3372 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3373 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3374 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3428,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3428 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3429 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3430 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3431 !(vport->fc_flag & FC_RSCN_MODE) &&
3432 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3433 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3434 return;
2648 } 3435 }
@@ -2919,11 +3706,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3706 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3707 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3708 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3709 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3710 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3711 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3712 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3713 lpfc_issue_clear_la(phba, vport);
3714 vport->port_state = LPFC_VPORT_READY;
3715 }
2927 } 3716 }
2928 3717
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3718 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3728,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3728 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3729 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3730 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3731 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3732 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3733 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3734 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3748,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3748 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3749 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3750 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3751 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3752 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3753 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3754 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3755 lpfc_issue_clear_la(phba, vport);
3756 vport->port_state = LPFC_VPORT_READY;
3757 }
2967 } 3758 }
2968 break; 3759 break;
2969 3760
@@ -3036,7 +3827,7 @@ restart_disc:
3036void 3827void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3828lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3829{
3039 MAILBOX_t *mb = &pmb->mb; 3830 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3831 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3832 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3833 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3835,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3835 pmb->context1 = NULL;
3045 3836
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3837 ndlp->nlp_rpi = mb->un.varWords[0];
3838 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3839 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3840 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3841
@@ -3297,3 +4089,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4089 return 1;
3298 return 0; 4090 return 0;
3299} 4091}
4092
4093/**
4094 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4095 * @phba: Pointer to hba context object.
4096 *
4097 * This function iterate through all FC nodes associated
4098 * will all vports to check if there is any node with
4099 * fc_rports associated with it. If there is an fc_rport
4100 * associated with the node, then the node is either in
4101 * discovered state or its devloss_timer is pending.
4102 */
4103static int
4104lpfc_fcf_inuse(struct lpfc_hba *phba)
4105{
4106 struct lpfc_vport **vports;
4107 int i, ret = 0;
4108 struct lpfc_nodelist *ndlp;
4109 struct Scsi_Host *shost;
4110
4111 vports = lpfc_create_vport_work_array(phba);
4112
4113 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4114 shost = lpfc_shost_from_vport(vports[i]);
4115 spin_lock_irq(shost->host_lock);
4116 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4117 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4118 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4119 ret = 1;
4120 spin_unlock_irq(shost->host_lock);
4121 goto out;
4122 }
4123 }
4124 spin_unlock_irq(shost->host_lock);
4125 }
4126out:
4127 lpfc_destroy_vport_work_array(phba, vports);
4128 return ret;
4129}
4130
4131/**
4132 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4133 * @phba: Pointer to hba context object.
4134 * @mboxq: Pointer to mailbox object.
4135 *
4136 * This function frees memory associated with the mailbox command.
4137 */
4138static void
4139lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4140{
4141 struct lpfc_vport *vport = mboxq->vport;
4142
4143 if (mboxq->u.mb.mbxStatus) {
4144 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4145 "2555 UNREG_VFI mbxStatus error x%x "
4146 "HBA state x%x\n",
4147 mboxq->u.mb.mbxStatus, vport->port_state);
4148 }
4149 mempool_free(mboxq, phba->mbox_mem_pool);
4150 return;
4151}
4152
4153/**
4154 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4155 * @phba: Pointer to hba context object.
4156 * @mboxq: Pointer to mailbox object.
4157 *
4158 * This function frees memory associated with the mailbox command.
4159 */
4160static void
4161lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4162{
4163 struct lpfc_vport *vport = mboxq->vport;
4164
4165 if (mboxq->u.mb.mbxStatus) {
4166 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4167 "2550 UNREG_FCFI mbxStatus error x%x "
4168 "HBA state x%x\n",
4169 mboxq->u.mb.mbxStatus, vport->port_state);
4170 }
4171 mempool_free(mboxq, phba->mbox_mem_pool);
4172 return;
4173}
4174
4175/**
4176 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4177 * @phba: Pointer to hba context object.
4178 *
4179 * This function check if there are any connected remote port for the FCF and
4180 * if all the devices are disconnected, this function unregister FCFI.
4181 * This function also tries to use another FCF for discovery.
4182 */
4183void
4184lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4185{
4186 LPFC_MBOXQ_t *mbox;
4187 int rc;
4188 struct lpfc_vport **vports;
4189 int i;
4190
4191 spin_lock_irq(&phba->hbalock);
4192 /*
4193 * If HBA is not running in FIP mode or
4194 * If HBA does not support FCoE or
4195 * If FCF is not registered.
4196 * do nothing.
4197 */
4198 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4199 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4200 (phba->cfg_enable_fip == 0)) {
4201 spin_unlock_irq(&phba->hbalock);
4202 return;
4203 }
4204 spin_unlock_irq(&phba->hbalock);
4205
4206 if (lpfc_fcf_inuse(phba))
4207 return;
4208
4209
4210 /* Unregister VPIs */
4211 vports = lpfc_create_vport_work_array(phba);
4212 if (vports &&
4213 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4214 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4215 lpfc_mbx_unreg_vpi(vports[i]);
4216 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4217 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4218 }
4219 lpfc_destroy_vport_work_array(phba, vports);
4220
4221 /* Unregister VFI */
4222 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4223 if (!mbox) {
4224 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4225 "2556 UNREG_VFI mbox allocation failed"
4226 "HBA state x%x\n",
4227 phba->pport->port_state);
4228 return;
4229 }
4230
4231 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4232 mbox->vport = phba->pport;
4233 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4234
4235 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4236 if (rc == MBX_NOT_FINISHED) {
4237 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4238 "2557 UNREG_VFI issue mbox failed rc x%x "
4239 "HBA state x%x\n",
4240 rc, phba->pport->port_state);
4241 mempool_free(mbox, phba->mbox_mem_pool);
4242 return;
4243 }
4244
4245 /* Unregister FCF */
4246 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4247 if (!mbox) {
4248 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4249 "2551 UNREG_FCFI mbox allocation failed"
4250 "HBA state x%x\n",
4251 phba->pport->port_state);
4252 return;
4253 }
4254
4255 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4256 mbox->vport = phba->pport;
4257 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4258 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4259
4260 if (rc == MBX_NOT_FINISHED) {
4261 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4262 "2552 UNREG_FCFI issue mbox failed rc x%x "
4263 "HBA state x%x\n",
4264 rc, phba->pport->port_state);
4265 mempool_free(mbox, phba->mbox_mem_pool);
4266 return;
4267 }
4268
4269 spin_lock_irq(&phba->hbalock);
4270 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4271 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4272 FCF_VALID_VLAN);
4273 spin_unlock_irq(&phba->hbalock);
4274
4275 /*
4276 * If driver is not unloading, check if there is any other
4277 * FCF record that can be used for discovery.
4278 */
4279 if ((phba->pport->load_flag & FC_UNLOADING) ||
4280 (phba->link_state < LPFC_LINK_UP))
4281 return;
4282
4283 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4284
4285 if (rc)
4286 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4287 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4288 " record HBA state x%x\n",
4289 phba->pport->port_state);
4290}
4291
4292/**
4293 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4294 * @phba: Pointer to hba context object.
4295 * @buff: Buffer containing the FCF connection table as in the config
4296 * region.
4297 * This function create driver data structure for the FCF connection
4298 * record table read from config region 23.
4299 */
4300static void
4301lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4302 uint8_t *buff)
4303{
4304 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4305 struct lpfc_fcf_conn_hdr *conn_hdr;
4306 struct lpfc_fcf_conn_rec *conn_rec;
4307 uint32_t record_count;
4308 int i;
4309
4310 /* Free the current connect table */
4311 list_for_each_entry_safe(conn_entry, next_conn_entry,
4312 &phba->fcf_conn_rec_list, list)
4313 kfree(conn_entry);
4314
4315 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4316 record_count = conn_hdr->length * sizeof(uint32_t)/
4317 sizeof(struct lpfc_fcf_conn_rec);
4318
4319 conn_rec = (struct lpfc_fcf_conn_rec *)
4320 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4321
4322 for (i = 0; i < record_count; i++) {
4323 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4324 continue;
4325 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4326 GFP_KERNEL);
4327 if (!conn_entry) {
4328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4329 "2566 Failed to allocate connection"
4330 " table entry\n");
4331 return;
4332 }
4333
4334 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4335 sizeof(struct lpfc_fcf_conn_rec));
4336 conn_entry->conn_rec.vlan_tag =
4337 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4338 conn_entry->conn_rec.flags =
4339 le16_to_cpu(conn_entry->conn_rec.flags);
4340 list_add_tail(&conn_entry->list,
4341 &phba->fcf_conn_rec_list);
4342 }
4343}
4344
4345/**
4346 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4347 * @phba: Pointer to hba context object.
4348 * @buff: Buffer containing the FCoE parameter data structure.
4349 *
4350 * This function update driver data structure with config
4351 * parameters read from config region 23.
4352 */
4353static void
4354lpfc_read_fcoe_param(struct lpfc_hba *phba,
4355 uint8_t *buff)
4356{
4357 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4358 struct lpfc_fcoe_params *fcoe_param;
4359
4360 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4361 buff;
4362 fcoe_param = (struct lpfc_fcoe_params *)
4363 buff + sizeof(struct lpfc_fip_param_hdr);
4364
4365 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4366 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4367 return;
4368
4369 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4370 FIPP_MODE_ON)
4371 phba->cfg_enable_fip = 1;
4372
4373 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4374 FIPP_MODE_OFF)
4375 phba->cfg_enable_fip = 0;
4376
4377 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4378 phba->valid_vlan = 1;
4379 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4380 0xFFF;
4381 }
4382
4383 phba->fc_map[0] = fcoe_param->fc_map[0];
4384 phba->fc_map[1] = fcoe_param->fc_map[1];
4385 phba->fc_map[2] = fcoe_param->fc_map[2];
4386 return;
4387}
4388
4389/**
4390 * lpfc_get_rec_conf23 - Get a record type in config region data.
4391 * @buff: Buffer containing config region 23 data.
4392 * @size: Size of the data buffer.
4393 * @rec_type: Record type to be searched.
4394 *
4395 * This function searches config region data to find the begining
4396 * of the record specified by record_type. If record found, this
4397 * function return pointer to the record else return NULL.
4398 */
4399static uint8_t *
4400lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4401{
4402 uint32_t offset = 0, rec_length;
4403
4404 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4405 (size < sizeof(uint32_t)))
4406 return NULL;
4407
4408 rec_length = buff[offset + 1];
4409
4410 /*
4411 * One TLV record has one word header and number of data words
4412 * specified in the rec_length field of the record header.
4413 */
4414 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4415 <= size) {
4416 if (buff[offset] == rec_type)
4417 return &buff[offset];
4418
4419 if (buff[offset] == LPFC_REGION23_LAST_REC)
4420 return NULL;
4421
4422 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4423 rec_length = buff[offset + 1];
4424 }
4425 return NULL;
4426}
4427
4428/**
4429 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4430 * @phba: Pointer to lpfc_hba data structure.
4431 * @buff: Buffer containing config region 23 data.
4432 * @size: Size of the data buffer.
4433 *
4434 * This fuction parse the FCoE config parameters in config region 23 and
4435 * populate driver data structure with the parameters.
4436 */
4437void
4438lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4439 uint8_t *buff,
4440 uint32_t size)
4441{
4442 uint32_t offset = 0, rec_length;
4443 uint8_t *rec_ptr;
4444
4445 /*
4446 * If data size is less than 2 words signature and version cannot be
4447 * verified.
4448 */
4449 if (size < 2*sizeof(uint32_t))
4450 return;
4451
4452 /* Check the region signature first */
4453 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4455 "2567 Config region 23 has bad signature\n");
4456 return;
4457 }
4458
4459 offset += 4;
4460
4461 /* Check the data structure version */
4462 if (buff[offset] != LPFC_REGION23_VERSION) {
4463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4464 "2568 Config region 23 has bad version\n");
4465 return;
4466 }
4467 offset += 4;
4468
4469 rec_length = buff[offset + 1];
4470
4471 /* Read FCoE param record */
4472 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4473 size - offset, FCOE_PARAM_TYPE);
4474 if (rec_ptr)
4475 lpfc_read_fcoe_param(phba, rec_ptr);
4476
4477 /* Read FCF connection table */
4478 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4479 size - offset, FCOE_CONN_TBL_TYPE);
4480 if (rec_ptr)
4481 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4482
4483}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b8..8a3a026667e4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,8 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1155 1186
1156#define JEDEC_ID_ADDRESS 0x0080001c 1187#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1188#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1373,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1373#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1374#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1375#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1376
1348#define MBX_WRITE_WWN 0x98 1377#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1378#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1379#define MBX_LOAD_EXP_ROM 0x9C
1351 1380#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1381#define MBX_SLI4_REQ_FTRS 0x9D
1382#define MBX_MAX_CMDS 0x9E
1383#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1384#define MBX_SLI2_CMD_MASK 0x80
1385#define MBX_REG_VFI 0x9F
1386#define MBX_REG_FCFI 0xA0
1387#define MBX_UNREG_VFI 0xA1
1388#define MBX_UNREG_FCFI 0xA2
1389#define MBX_INIT_VFI 0xA3
1390#define MBX_INIT_VPI 0xA4
1354 1391
1355/* IOCB Commands */ 1392/* IOCB Commands */
1356 1393
@@ -1440,6 +1477,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1477#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1478#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1479
1480/* Unhandled Data Security SLI Commands */
1481#define DSSCMD_IWRITE64_CR 0xD8
1482#define DSSCMD_IWRITE64_CX 0xD9
1483#define DSSCMD_IREAD64_CR 0xDA
1484#define DSSCMD_IREAD64_CX 0xDB
1485#define DSSCMD_INVALIDATE_DEK 0xDC
1486#define DSSCMD_SET_KEK 0xDD
1487#define DSSCMD_GET_KEK_ID 0xDE
1488#define DSSCMD_GEN_XFER 0xDF
1489
1443#define CMD_MAX_IOCB_CMD 0xE6 1490#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1491#define CMD_IOCB_MASK 0xff
1445 1492
@@ -1466,6 +1513,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1513#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1514#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1515#define MBXERR_ERROR 16
1516#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1517#define MBX_NOT_FINISHED 255
1470 1518
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1519#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1552,6 @@ struct ulp_bde {
1504#endif 1552#endif
1505}; 1553};
1506 1554
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1555typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1556#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1557 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2309,7 @@ typedef struct {
2287 uint32_t rsvd3; 2309 uint32_t rsvd3;
2288 uint32_t rsvd4; 2310 uint32_t rsvd4;
2289 uint32_t rsvd5; 2311 uint32_t rsvd5;
2290 uint16_t rsvd6; 2312 uint16_t vfi;
2291 uint16_t vpi; 2313 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2314#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2315 uint32_t rsvd1;
@@ -2297,7 +2319,7 @@ typedef struct {
2297 uint32_t rsvd4; 2319 uint32_t rsvd4;
2298 uint32_t rsvd5; 2320 uint32_t rsvd5;
2299 uint16_t vpi; 2321 uint16_t vpi;
2300 uint16_t rsvd6; 2322 uint16_t vfi;
2301#endif 2323#endif
2302} REG_VPI_VAR; 2324} REG_VPI_VAR;
2303 2325
@@ -2457,7 +2479,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2479 uint32_t entry_index:16;
2458#endif 2480#endif
2459 2481
2460 uint32_t rsvd1; 2482 uint32_t sli4_length;
2461 uint32_t word_cnt; 2483 uint32_t word_cnt;
2462 uint32_t resp_offset; 2484 uint32_t resp_offset;
2463} DUMP_VAR; 2485} DUMP_VAR;
@@ -2470,9 +2492,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2492#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2493#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2494
2495#define DMP_REGION_VPORT 0x16 /* VPort info region */
2496#define DMP_VPORT_REGION_SIZE 0x200
2497#define DMP_MBOX_OFFSET_WORD 0x5
2498
2499#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2500#define DMP_FCOEPARAM_RGN_SIZE 0x400
2501
2473#define WAKE_UP_PARMS_REGION_ID 4 2502#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2503#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2504
2505struct vport_rec {
2506 uint8_t wwpn[8];
2507 uint8_t wwnn[8];
2508};
2509
2510#define VPORT_INFO_SIG 0x32324752
2511#define VPORT_INFO_REV_MASK 0xff
2512#define VPORT_INFO_REV 0x1
2513#define MAX_STATIC_VPORT_COUNT 16
2514struct static_vport_info {
2515 uint32_t signature;
2516 uint32_t rev;
2517 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2518 uint32_t resvd[66];
2519};
2520
2476/* Option rom version structure */ 2521/* Option rom version structure */
2477struct prog_id { 2522struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2523#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2742,9 @@ typedef struct {
2697#endif 2742#endif
2698 2743
2699#ifdef __BIG_ENDIAN_BITFIELD 2744#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2745 uint32_t rsvd1 : 19; /* Reserved */
2746 uint32_t cdss : 1; /* Configure Data Security SLI */
2747 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2748 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2749 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2750 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2764,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2764 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2765 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2766 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2767 uint32_t rsvd2 : 3; /* Reserved */
2768 uint32_t cdss : 1; /* Configure Data Security SLI */
2769 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2770#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2771#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2772 uint32_t rsvd3 : 19; /* Reserved */
2773 uint32_t gdss : 1; /* Configure Data Security SLI */
2774 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2775 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2776 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2777 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2791,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2791 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2792 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2793 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2794 uint32_t rsvd4 : 3; /* Reserved */
2795 uint32_t gdss : 1; /* Configure Data Security SLI */
2796 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2797#endif
2745 2798
2746#ifdef __BIG_ENDIAN_BITFIELD 2799#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2806,20 @@ typedef struct {
2753 2806
2754#ifdef __BIG_ENDIAN_BITFIELD 2807#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2808 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2810#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2811 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2813#endif
2761 2814
2762 uint32_t rsvd4; /* Reserved */ 2815 uint32_t rsvd6; /* Reserved */
2763 2816
2764#ifdef __BIG_ENDIAN_BITFIELD 2817#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2818 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2819 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2820#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2821 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2822 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2823#endif
2771 2824
2772} CONFIG_PORT_VAR; 2825} CONFIG_PORT_VAR;
@@ -3666,3 +3719,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3719#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3720#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3721#define SETVAR_MLORST 0x103007
3722
3723#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 000000000000..2995d128f07f
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fcf_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d8..fc67cc65c63b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -406,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
406 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
407 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
408 phba->cfg_hba_queue_depth = 430 phba->cfg_hba_queue_depth =
409 mb->un.varRdConfig.max_xri + 1; 431 (mb->un.varRdConfig.max_xri + 1) -
432 lpfc_sli4_get_els_iocb_cnt(phba);
410 433
411 phba->lmt = mb->un.varRdConfig.lmt; 434 phba->lmt = mb->un.varRdConfig.lmt;
412 435
@@ -460,17 +483,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 483 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 484 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 485 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 486 pmb->u.mb.mbxCommand,
487 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 488 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 489 return -EIO;
466 } 490 }
467 } 491 }
468 492
493 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 494 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 495 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 496
472 /* Enable appropriate host interrupts */ 497 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 498 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 499 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 500 if (psli->num_rings > 0)
@@ -571,16 +595,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 595{
572 struct lpfc_vport **vports; 596 struct lpfc_vport **vports;
573 int i; 597 int i;
574 /* Disable interrupts */ 598
575 writel(0, phba->HCregaddr); 599 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 600 /* Disable interrupts */
601 writel(0, phba->HCregaddr);
602 readl(phba->HCregaddr); /* flush */
603 }
577 604
578 if (phba->pport->load_flag & FC_UNLOADING) 605 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 606 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 607 else {
581 vports = lpfc_create_vport_work_array(phba); 608 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 609 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 610 for (i = 0; i <= phba->max_vports &&
611 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 612 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 613 lpfc_destroy_vport_work_array(phba, vports);
586 } 614 }
@@ -588,7 +616,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 616}
589 617
590/** 618/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 619 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 620 * @phba: pointer to lpfc HBA data structure.
593 * 621 *
594 * This routine will do uninitialization after the HBA is reset when bring 622 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +626,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 626 * 0 - sucess.
599 * Any other value - error. 627 * Any other value - error.
600 **/ 628 **/
601int 629static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 630lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 631{
604 struct lpfc_sli *psli = &phba->sli; 632 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 633 struct lpfc_sli_ring *pring;
@@ -642,6 +670,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 670
643 return 0; 671 return 0;
644} 672}
673/**
674 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
675 * @phba: pointer to lpfc HBA data structure.
676 *
677 * This routine will do uninitialization after the HBA is reset when bring
678 * down the SLI Layer.
679 *
680 * Return codes
681 * 0 - sucess.
682 * Any other value - error.
683 **/
684static int
685lpfc_hba_down_post_s4(struct lpfc_hba *phba)
686{
687 struct lpfc_scsi_buf *psb, *psb_next;
688 LIST_HEAD(aborts);
689 int ret;
690 unsigned long iflag = 0;
691 ret = lpfc_hba_down_post_s3(phba);
692 if (ret)
693 return ret;
694 /* At this point in time the HBA is either reset or DOA. Either
695 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
696 * on the lpfc_sgl_list so that it can either be freed if the
697 * driver is unloading or reposted if the driver is restarting
698 * the port.
699 */
700 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
701 /* scsl_buf_list */
702 /* abts_sgl_list_lock required because worker thread uses this
703 * list.
704 */
705 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
706 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
707 &phba->sli4_hba.lpfc_sgl_list);
708 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
709 /* abts_scsi_buf_list_lock required because worker thread uses this
710 * list.
711 */
712 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
713 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
714 &aborts);
715 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
716 spin_unlock_irq(&phba->hbalock);
717
718 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
719 psb->pCmd = NULL;
720 psb->status = IOSTAT_SUCCESS;
721 }
722 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
723 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
724 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
725 return 0;
726}
727
728/**
729 * lpfc_hba_down_post - Wrapper func for hba down post routine
730 * @phba: pointer to lpfc HBA data structure.
731 *
732 * This routine wraps the actual SLI3 or SLI4 routine for performing
733 * uninitialization after the HBA is reset when bring down the SLI Layer.
734 *
735 * Return codes
736 * 0 - sucess.
737 * Any other value - error.
738 **/
739int
740lpfc_hba_down_post(struct lpfc_hba *phba)
741{
742 return (*phba->lpfc_hba_down_post)(phba);
743}
645 744
646/** 745/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 746 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +908,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 908 "taking this port offline.\n");
810 909
811 spin_lock_irq(&phba->hbalock); 910 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 911 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 912 spin_unlock_irq(&phba->hbalock);
814 913
815 lpfc_offline_prep(phba); 914 lpfc_offline_prep(phba);
@@ -834,13 +933,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 933 struct lpfc_sli *psli = &phba->sli;
835 934
836 spin_lock_irq(&phba->hbalock); 935 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 936 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 937 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 938 lpfc_offline_prep(phba);
840 939
841 lpfc_offline(phba); 940 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 941 lpfc_reset_barrier(phba);
942 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 943 lpfc_sli_brdreset(phba);
944 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 945 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 946 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 947 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +950,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 950}
850 951
851/** 952/**
953 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
954 * @phba: pointer to lpfc hba data structure.
955 *
956 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
957 * other than Port Error 6 has been detected.
958 **/
959static void
960lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
961{
962 lpfc_offline_prep(phba);
963 lpfc_offline(phba);
964 lpfc_sli4_brdreset(phba);
965 lpfc_hba_down_post(phba);
966 lpfc_sli4_post_status_check(phba);
967 lpfc_unblock_mgmt_io(phba);
968 phba->link_state = LPFC_HBA_ERROR;
969}
970
971/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 972 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 973 * @phba: pointer to lpfc hba data structure.
854 * 974 *
@@ -864,6 +984,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 984 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 985 struct lpfc_sli *psli = &phba->sli;
866 986
987 /* If the pci channel is offline, ignore possible errors,
988 * since we cannot communicate with the pci card anyway.
989 */
990 if (pci_channel_offline(phba->pcidev)) {
991 spin_lock_irq(&phba->hbalock);
992 phba->hba_flag &= ~DEFER_ERATT;
993 spin_unlock_irq(&phba->hbalock);
994 return;
995 }
996
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 998 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 999 "Data: x%x x%x x%x\n",
@@ -871,7 +1001,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1001 phba->work_status[0], phba->work_status[1]);
872 1002
873 spin_lock_irq(&phba->hbalock); 1003 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1004 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1005 spin_unlock_irq(&phba->hbalock);
876 1006
877 1007
@@ -909,13 +1039,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1039 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1040 phba->work_hs = old_host_status & ~HS_FFER1;
911 1041
1042 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1043 phba->hba_flag &= ~DEFER_ERATT;
1044 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1045 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1046 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1047}
916 1048
1049static void
1050lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1051{
1052 struct lpfc_board_event_header board_event;
1053 struct Scsi_Host *shost;
1054
1055 board_event.event_type = FC_REG_BOARD_EVENT;
1056 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1057 shost = lpfc_shost_from_vport(phba->pport);
1058 fc_host_post_vendor_event(shost, fc_get_event_number(),
1059 sizeof(board_event),
1060 (char *) &board_event,
1061 LPFC_NL_VENDOR_ID);
1062}
1063
917/** 1064/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1065 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1066 * @phba: pointer to lpfc hba data structure.
920 * 1067 *
921 * This routine is invoked to handle the following HBA hardware error 1068 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1071,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1071 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1072 * 3 - Mailbox command came back as unknown
926 **/ 1073 **/
927void 1074static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1075lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1076{
930 struct lpfc_vport *vport = phba->pport; 1077 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1078 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1081,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1081 unsigned long temperature;
935 struct temp_event temp_event_data; 1082 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1083 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1084
939 /* If the pci channel is offline, ignore possible errors, 1085 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1086 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1087 */
1088 if (pci_channel_offline(phba->pcidev)) {
1089 spin_lock_irq(&phba->hbalock);
1090 phba->hba_flag &= ~DEFER_ERATT;
1091 spin_unlock_irq(&phba->hbalock);
942 return; 1092 return;
1093 }
1094
943 /* If resets are disabled then leave the HBA alone and return */ 1095 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1096 if (!phba->cfg_enable_hba_reset)
945 return; 1097 return;
946 1098
947 /* Send an internal error event to mgmt application */ 1099 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1100 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1101
956 if (phba->hba_flag & DEFER_ERATT) 1102 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1103 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1111,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1111 phba->work_status[0], phba->work_status[1]);
966 1112
967 spin_lock_irq(&phba->hbalock); 1113 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1114 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1115 spin_unlock_irq(&phba->hbalock);
970 1116
971 /* 1117 /*
@@ -1037,6 +1183,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1183}
1038 1184
1039/** 1185/**
1186 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1187 * @phba: pointer to lpfc hba data structure.
1188 *
1189 * This routine is invoked to handle the SLI4 HBA hardware error attention
1190 * conditions.
1191 **/
1192static void
1193lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1194{
1195 struct lpfc_vport *vport = phba->pport;
1196 uint32_t event_data;
1197 struct Scsi_Host *shost;
1198
1199 /* If the pci channel is offline, ignore possible errors, since
1200 * we cannot communicate with the pci card anyway.
1201 */
1202 if (pci_channel_offline(phba->pcidev))
1203 return;
1204 /* If resets are disabled then leave the HBA alone and return */
1205 if (!phba->cfg_enable_hba_reset)
1206 return;
1207
1208 /* Send an internal error event to mgmt application */
1209 lpfc_board_errevt_to_mgmt(phba);
1210
1211 /* For now, the actual action for SLI4 device handling is not
1212 * specified yet, just treated it as adaptor hardware failure
1213 */
1214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1215 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1216 phba->work_status[0], phba->work_status[1]);
1217
1218 event_data = FC_REG_DUMP_EVENT;
1219 shost = lpfc_shost_from_vport(vport);
1220 fc_host_post_vendor_event(shost, fc_get_event_number(),
1221 sizeof(event_data), (char *) &event_data,
1222 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1223
1224 lpfc_sli4_offline_eratt(phba);
1225}
1226
1227/**
1228 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1229 * @phba: pointer to lpfc HBA data structure.
1230 *
1231 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1232 * routine from the API jump table function pointer from the lpfc_hba struct.
1233 *
1234 * Return codes
1235 * 0 - sucess.
1236 * Any other value - error.
1237 **/
1238void
1239lpfc_handle_eratt(struct lpfc_hba *phba)
1240{
1241 (*phba->lpfc_handle_eratt)(phba);
1242}
1243
1244/**
1040 * lpfc_handle_latt - The HBA link event handler 1245 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1246 * @phba: pointer to lpfc hba data structure.
1042 * 1247 *
@@ -1137,7 +1342,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1342 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1343 * 1 - success
1139 **/ 1344 **/
1140static int 1345int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1346lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1347{
1143 uint8_t lenlo, lenhi; 1348 uint8_t lenlo, lenhi;
@@ -1292,6 +1497,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1497 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1498 int max_speed;
1294 int GE = 0; 1499 int GE = 0;
1500 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1501 struct {
1296 char * name; 1502 char * name;
1297 int max_speed; 1503 int max_speed;
@@ -1437,6 +1643,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1643 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1644 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1645 break;
1646 case PCI_DEVICE_ID_TIGERSHARK:
1647 oneConnect = 1;
1648 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1649 break;
1440 default: 1650 default:
1441 m = (typeof(m)){ NULL }; 1651 m = (typeof(m)){ NULL };
1442 break; 1652 break;
@@ -1444,13 +1654,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1654
1445 if (mdp && mdp[0] == '\0') 1655 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1656 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1657 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1658 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1659 */
1450 m.name, m.max_speed, 1660 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1661 if (oneConnect)
1452 m.bus, 1662 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1663 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1664 m.name,
1665 phba->Port);
1666 else
1667 snprintf(descp, 255,
1668 "Emulex %s %d%s %s %s",
1669 m.name, m.max_speed,
1670 (GE) ? "GE" : "Gb",
1671 m.bus,
1672 (GE) ? "FCoE Adapter" :
1673 "Fibre Channel Adapter");
1674 }
1454} 1675}
1455 1676
1456/** 1677/**
@@ -1533,7 +1754,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1754 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1755 icmd->ulpLe = 1;
1535 1756
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1757 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1758 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1759 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1760 kfree(mp1);
1539 cnt++; 1761 cnt++;
@@ -1761,7 +1983,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1983 * Lets wait for this to happen, if needed.
1762 */ 1984 */
1763 while (!list_empty(&vport->fc_nodes)) { 1985 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1986 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1987 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1988 "0233 Nodelist not empty\n");
@@ -1782,7 +2003,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2003 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2004 msleep(10);
1784 } 2005 }
1785 return;
1786} 2006}
1787 2007
1788/** 2008/**
@@ -1803,22 +2023,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2023}
1804 2024
1805/** 2025/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2026 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2027 * @phba: pointer to lpfc hba data structure.
1808 * 2028 *
1809 * This routine stops all the timers associated with a HBA. This function is 2029 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2030 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2031 **/
1812static void 2032void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2033lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2034{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2035 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2036 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2037 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2038 del_timer_sync(&phba->eratt_poll);
2039 del_timer_sync(&phba->hb_tmofunc);
2040 phba->hb_outstanding = 0;
2041
2042 switch (phba->pci_dev_grp) {
2043 case LPFC_PCI_DEV_LP:
2044 /* Stop any LightPulse device specific driver timers */
2045 del_timer_sync(&phba->fcp_poll_timer);
2046 break;
2047 case LPFC_PCI_DEV_OC:
2048 /* Stop any OneConnect device sepcific driver timers */
2049 break;
2050 default:
2051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2052 "0297 Invalid device group (x%x)\n",
2053 phba->pci_dev_grp);
2054 break;
2055 }
1822 return; 2056 return;
1823} 2057}
1824 2058
@@ -1878,14 +2112,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2112 return 1;
1879 } 2113 }
1880 2114
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2115 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2116 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2117 lpfc_unblock_mgmt_io(phba);
2118 return 1;
2119 }
2120 } else {
2121 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2122 lpfc_unblock_mgmt_io(phba);
2123 return 1;
2124 }
1884 } 2125 }
1885 2126
1886 vports = lpfc_create_vport_work_array(phba); 2127 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2128 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2129 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2130 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2131 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2132 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2188,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2188 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2189 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2190 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2191 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2192 struct Scsi_Host *shost;
1952 2193
1953 if (vports[i]->load_flag & FC_UNLOADING) 2194 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2195 continue;
2196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2197 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2198 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2199 &vports[i]->fc_nodes,
@@ -1975,7 +2217,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2217 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2218 lpfc_destroy_vport_work_array(phba, vports);
1977 2219
1978 lpfc_sli_flush_mbox_queue(phba); 2220 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2221}
1980 2222
1981/** 2223/**
@@ -1996,11 +2238,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2238 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2239 return;
1998 2240
1999 /* stop all timers associated with this hba */ 2241 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2242 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2243 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2244 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2245 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2246 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2247 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2248 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2255,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2255 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2256 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2257 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2258 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2259 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2260 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2261 vports[i]->work_port_events = 0;
@@ -2106,6 +2348,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2348 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2349 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2350 shost->max_cmd_len = 16;
2351 if (phba->sli_rev == LPFC_SLI_REV4) {
2352 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2353 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2354 }
2109 2355
2110 /* 2356 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2357 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2369,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2369
2124 /* Initialize all internally managed lists. */ 2370 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2371 INIT_LIST_HEAD(&vport->fc_nodes);
2372 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2373 spin_lock_init(&vport->work_port_lock);
2127 2374
2128 init_timer(&vport->fc_disctmo); 2375 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2561,3462 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2561}
2315 2562
2316/** 2563/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2564 * lpfc_stop_port_s3 - Stop SLI3 device port
2565 * @phba: pointer to lpfc hba data structure.
2566 *
2567 * This routine is invoked to stop an SLI3 device port, it stops the device
2568 * from generating interrupts and stops the device driver's timers for the
2569 * device.
2570 **/
2571static void
2572lpfc_stop_port_s3(struct lpfc_hba *phba)
2573{
2574 /* Clear all interrupt enable conditions */
2575 writel(0, phba->HCregaddr);
2576 readl(phba->HCregaddr); /* flush */
2577 /* Clear all pending interrupts */
2578 writel(0xffffffff, phba->HAregaddr);
2579 readl(phba->HAregaddr); /* flush */
2580
2581 /* Reset some HBA SLI setup states */
2582 lpfc_stop_hba_timers(phba);
2583 phba->pport->work_port_events = 0;
2584}
2585
2586/**
2587 * lpfc_stop_port_s4 - Stop SLI4 device port
2588 * @phba: pointer to lpfc hba data structure.
2589 *
2590 * This routine is invoked to stop an SLI4 device port, it stops the device
2591 * from generating interrupts and stops the device driver's timers for the
2592 * device.
2593 **/
2594static void
2595lpfc_stop_port_s4(struct lpfc_hba *phba)
2596{
2597 /* Reset some HBA SLI4 setup states */
2598 lpfc_stop_hba_timers(phba);
2599 phba->pport->work_port_events = 0;
2600 phba->sli4_hba.intr_enable = 0;
2601 /* Hard clear it for now, shall have more graceful way to wait later */
2602 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2603}
2604
2605/**
2606 * lpfc_stop_port - Wrapper function for stopping hba port
2607 * @phba: Pointer to HBA context object.
2608 *
2609 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2610 * the API jump table function pointer from the lpfc_hba struct.
2611 **/
2612void
2613lpfc_stop_port(struct lpfc_hba *phba)
2614{
2615 phba->lpfc_stop_port(phba);
2616}
2617
2618/**
2619 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2620 * @phba: pointer to lpfc hba data structure.
2621 *
2622 * This routine is invoked to remove the driver default fcf record from
2623 * the port. This routine currently acts on FCF Index 0.
2624 *
2625 **/
2626void
2627lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2628{
2629 int rc = 0;
2630 LPFC_MBOXQ_t *mboxq;
2631 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2632 uint32_t mbox_tmo, req_len;
2633 uint32_t shdr_status, shdr_add_status;
2634
2635 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2636 if (!mboxq) {
2637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2638 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2639 return;
2640 }
2641
2642 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2643 sizeof(struct lpfc_sli4_cfg_mhdr);
2644 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2645 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2646 req_len, LPFC_SLI4_MBX_EMBED);
2647 /*
2648 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2649 * supports multiple FCF indices.
2650 */
2651 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2652 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2653 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2654 phba->fcf.fcf_indx);
2655
2656 if (!phba->sli4_hba.intr_enable)
2657 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2658 else {
2659 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2660 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2661 }
2662 /* The IOCTL status is embedded in the mailbox subheader. */
2663 shdr_status = bf_get(lpfc_mbox_hdr_status,
2664 &del_fcf_record->header.cfg_shdr.response);
2665 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2666 &del_fcf_record->header.cfg_shdr.response);
2667 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2669 "2516 DEL FCF of default FCF Index failed "
2670 "mbx status x%x, status x%x add_status x%x\n",
2671 rc, shdr_status, shdr_add_status);
2672 }
2673 if (rc != MBX_TIMEOUT)
2674 mempool_free(mboxq, phba->mbox_mem_pool);
2675}
2676
2677/**
2678 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2679 * @phba: pointer to lpfc hba data structure.
2680 * @acqe_link: pointer to the async link completion queue entry.
2681 *
2682 * This routine is to parse the SLI4 link-attention link fault code and
2683 * translate it into the base driver's read link attention mailbox command
2684 * status.
2685 *
2686 * Return: Link-attention status in terms of base driver's coding.
2687 **/
2688static uint16_t
2689lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2690 struct lpfc_acqe_link *acqe_link)
2691{
2692 uint16_t latt_fault;
2693
2694 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2695 case LPFC_ASYNC_LINK_FAULT_NONE:
2696 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2697 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2698 latt_fault = 0;
2699 break;
2700 default:
2701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2702 "0398 Invalid link fault code: x%x\n",
2703 bf_get(lpfc_acqe_link_fault, acqe_link));
2704 latt_fault = MBXERR_ERROR;
2705 break;
2706 }
2707 return latt_fault;
2708}
2709
2710/**
2711 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2712 * @phba: pointer to lpfc hba data structure.
2713 * @acqe_link: pointer to the async link completion queue entry.
2714 *
2715 * This routine is to parse the SLI4 link attention type and translate it
2716 * into the base driver's link attention type coding.
2717 *
2718 * Return: Link attention type in terms of base driver's coding.
2719 **/
2720static uint8_t
2721lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2722 struct lpfc_acqe_link *acqe_link)
2723{
2724 uint8_t att_type;
2725
2726 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2727 case LPFC_ASYNC_LINK_STATUS_DOWN:
2728 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2729 att_type = AT_LINK_DOWN;
2730 break;
2731 case LPFC_ASYNC_LINK_STATUS_UP:
2732 /* Ignore physical link up events - wait for logical link up */
2733 att_type = AT_RESERVED;
2734 break;
2735 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2736 att_type = AT_LINK_UP;
2737 break;
2738 default:
2739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2740 "0399 Invalid link attention type: x%x\n",
2741 bf_get(lpfc_acqe_link_status, acqe_link));
2742 att_type = AT_RESERVED;
2743 break;
2744 }
2745 return att_type;
2746}
2747
2748/**
2749 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2750 * @phba: pointer to lpfc hba data structure.
2751 * @acqe_link: pointer to the async link completion queue entry.
2752 *
2753 * This routine is to parse the SLI4 link-attention link speed and translate
2754 * it into the base driver's link-attention link speed coding.
2755 *
2756 * Return: Link-attention link speed in terms of base driver's coding.
2757 **/
2758static uint8_t
2759lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2760 struct lpfc_acqe_link *acqe_link)
2761{
2762 uint8_t link_speed;
2763
2764 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2765 case LPFC_ASYNC_LINK_SPEED_ZERO:
2766 link_speed = LA_UNKNW_LINK;
2767 break;
2768 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2775 link_speed = LA_1GHZ_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2778 link_speed = LA_10GHZ_LINK;
2779 break;
2780 default:
2781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2782 "0483 Invalid link-attention link speed: x%x\n",
2783 bf_get(lpfc_acqe_link_speed, acqe_link));
2784 link_speed = LA_UNKNW_LINK;
2785 break;
2786 }
2787 return link_speed;
2788}
2789
2790/**
2791 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2792 * @phba: pointer to lpfc hba data structure.
2793 * @acqe_link: pointer to the async link completion queue entry.
2794 *
2795 * This routine is to handle the SLI4 asynchronous link event.
2796 **/
2797static void
2798lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2799 struct lpfc_acqe_link *acqe_link)
2800{
2801 struct lpfc_dmabuf *mp;
2802 LPFC_MBOXQ_t *pmb;
2803 MAILBOX_t *mb;
2804 READ_LA_VAR *la;
2805 uint8_t att_type;
2806
2807 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2808 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2809 return;
2810 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2811 if (!pmb) {
2812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2813 "0395 The mboxq allocation failed\n");
2814 return;
2815 }
2816 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2817 if (!mp) {
2818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2819 "0396 The lpfc_dmabuf allocation failed\n");
2820 goto out_free_pmb;
2821 }
2822 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2823 if (!mp->virt) {
2824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2825 "0397 The mbuf allocation failed\n");
2826 goto out_free_dmabuf;
2827 }
2828
2829 /* Cleanup any outstanding ELS commands */
2830 lpfc_els_flush_all_cmd(phba);
2831
2832 /* Block ELS IOCBs until we have done process link event */
2833 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2834
2835 /* Update link event statistics */
2836 phba->sli.slistat.link_event++;
2837
2838 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2839 lpfc_read_la(phba, pmb, mp);
2840 pmb->vport = phba->pport;
2841
2842 /* Parse and translate status field */
2843 mb = &pmb->u.mb;
2844 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2845
2846 /* Parse and translate link attention fields */
2847 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2848 la->eventTag = acqe_link->event_tag;
2849 la->attType = att_type;
2850 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2851
2852 /* Fake the the following irrelvant fields */
2853 la->topology = TOPOLOGY_PT_PT;
2854 la->granted_AL_PA = 0;
2855 la->il = 0;
2856 la->pb = 0;
2857 la->fa = 0;
2858 la->mm = 0;
2859
2860 /* Keep the link status for extra SLI4 state machine reference */
2861 phba->sli4_hba.link_state.speed =
2862 bf_get(lpfc_acqe_link_speed, acqe_link);
2863 phba->sli4_hba.link_state.duplex =
2864 bf_get(lpfc_acqe_link_duplex, acqe_link);
2865 phba->sli4_hba.link_state.status =
2866 bf_get(lpfc_acqe_link_status, acqe_link);
2867 phba->sli4_hba.link_state.physical =
2868 bf_get(lpfc_acqe_link_physical, acqe_link);
2869 phba->sli4_hba.link_state.fault =
2870 bf_get(lpfc_acqe_link_fault, acqe_link);
2871
2872 /* Invoke the lpfc_handle_latt mailbox command callback function */
2873 lpfc_mbx_cmpl_read_la(phba, pmb);
2874
2875 return;
2876
2877out_free_dmabuf:
2878 kfree(mp);
2879out_free_pmb:
2880 mempool_free(pmb, phba->mbox_mem_pool);
2881}
2882
2883/**
2884 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2885 * @phba: pointer to lpfc hba data structure.
2886 * @acqe_link: pointer to the async fcoe completion queue entry.
2887 *
2888 * This routine is to handle the SLI4 asynchronous fcoe event.
2889 **/
2890static void
2891lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2892 struct lpfc_acqe_fcoe *acqe_fcoe)
2893{
2894 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2895 int rc;
2896
2897 switch (event_type) {
2898 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2899 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2900 "2546 New FCF found index 0x%x tag 0x%x \n",
2901 acqe_fcoe->fcf_index,
2902 acqe_fcoe->event_tag);
2903 /*
2904 * If the current FCF is in discovered state,
2905 * do nothing.
2906 */
2907 spin_lock_irq(&phba->hbalock);
2908 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2909 spin_unlock_irq(&phba->hbalock);
2910 break;
2911 }
2912 spin_unlock_irq(&phba->hbalock);
2913
2914 /* Read the FCF table and re-discover SAN. */
2915 rc = lpfc_sli4_read_fcf_record(phba,
2916 LPFC_FCOE_FCF_GET_FIRST);
2917 if (rc)
2918 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2919 "2547 Read FCF record failed 0x%x\n",
2920 rc);
2921 break;
2922
2923 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2924 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2925 "2548 FCF Table full count 0x%x tag 0x%x \n",
2926 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2927 acqe_fcoe->event_tag);
2928 break;
2929
2930 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2931 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2932 "2549 FCF disconnected fron network index 0x%x"
2933 " tag 0x%x \n", acqe_fcoe->fcf_index,
2934 acqe_fcoe->event_tag);
2935 /* If the event is not for currently used fcf do nothing */
2936 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2937 break;
2938 /*
2939 * Currently, driver support only one FCF - so treat this as
2940 * a link down.
2941 */
2942 lpfc_linkdown(phba);
2943 /* Unregister FCF if no devices connected to it */
2944 lpfc_unregister_unused_fcf(phba);
2945 break;
2946
2947 default:
2948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2949 "0288 Unknown FCoE event type 0x%x event tag "
2950 "0x%x\n", event_type, acqe_fcoe->event_tag);
2951 break;
2952 }
2953}
2954
2955/**
2956 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2957 * @phba: pointer to lpfc hba data structure.
2958 * @acqe_link: pointer to the async dcbx completion queue entry.
2959 *
2960 * This routine is to handle the SLI4 asynchronous dcbx event.
2961 **/
2962static void
2963lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2964 struct lpfc_acqe_dcbx *acqe_dcbx)
2965{
2966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2967 "0290 The SLI4 DCBX asynchronous event is not "
2968 "handled yet\n");
2969}
2970
2971/**
2972 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2973 * @phba: pointer to lpfc hba data structure.
2974 *
2975 * This routine is invoked by the worker thread to process all the pending
2976 * SLI4 asynchronous events.
2977 **/
2978void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2979{
2980 struct lpfc_cq_event *cq_event;
2981
2982 /* First, declare the async event has been handled */
2983 spin_lock_irq(&phba->hbalock);
2984 phba->hba_flag &= ~ASYNC_EVENT;
2985 spin_unlock_irq(&phba->hbalock);
2986 /* Now, handle all the async events */
2987 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2988 /* Get the first event from the head of the event queue */
2989 spin_lock_irq(&phba->hbalock);
2990 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2991 cq_event, struct lpfc_cq_event, list);
2992 spin_unlock_irq(&phba->hbalock);
2993 /* Process the asynchronous event */
2994 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2995 case LPFC_TRAILER_CODE_LINK:
2996 lpfc_sli4_async_link_evt(phba,
2997 &cq_event->cqe.acqe_link);
2998 break;
2999 case LPFC_TRAILER_CODE_FCOE:
3000 lpfc_sli4_async_fcoe_evt(phba,
3001 &cq_event->cqe.acqe_fcoe);
3002 break;
3003 case LPFC_TRAILER_CODE_DCBX:
3004 lpfc_sli4_async_dcbx_evt(phba,
3005 &cq_event->cqe.acqe_dcbx);
3006 break;
3007 default:
3008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3009 "1804 Invalid asynchrous event code: "
3010 "x%x\n", bf_get(lpfc_trailer_code,
3011 &cq_event->cqe.mcqe_cmpl));
3012 break;
3013 }
3014 /* Free the completion event processed to the free pool */
3015 lpfc_sli4_cq_event_release(phba, cq_event);
3016 }
3017}
3018
3019/**
3020 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3021 * @phba: pointer to lpfc hba data structure.
3022 * @dev_grp: The HBA PCI-Device group number.
3023 *
3024 * This routine is invoked to set up the per HBA PCI-Device group function
3025 * API jump table entries.
3026 *
3027 * Return: 0 if success, otherwise -ENODEV
3028 **/
3029int
3030lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3031{
3032 int rc;
3033
3034 /* Set up lpfc PCI-device group */
3035 phba->pci_dev_grp = dev_grp;
3036
3037 /* The LPFC_PCI_DEV_OC uses SLI4 */
3038 if (dev_grp == LPFC_PCI_DEV_OC)
3039 phba->sli_rev = LPFC_SLI_REV4;
3040
3041 /* Set up device INIT API function jump table */
3042 rc = lpfc_init_api_table_setup(phba, dev_grp);
3043 if (rc)
3044 return -ENODEV;
3045 /* Set up SCSI API function jump table */
3046 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3047 if (rc)
3048 return -ENODEV;
3049 /* Set up SLI API function jump table */
3050 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3051 if (rc)
3052 return -ENODEV;
3053 /* Set up MBOX API function jump table */
3054 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3055 if (rc)
3056 return -ENODEV;
3057
3058 return 0;
3059}
3060
3061/**
3062 * lpfc_log_intr_mode - Log the active interrupt mode
3063 * @phba: pointer to lpfc hba data structure.
3064 * @intr_mode: active interrupt mode adopted.
3065 *
3066 * This routine it invoked to log the currently used active interrupt mode
3067 * to the device.
3068 **/
3069static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3070{
3071 switch (intr_mode) {
3072 case 0:
3073 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3074 "0470 Enable INTx interrupt mode.\n");
3075 break;
3076 case 1:
3077 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3078 "0481 Enabled MSI interrupt mode.\n");
3079 break;
3080 case 2:
3081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3082 "0480 Enabled MSI-X interrupt mode.\n");
3083 break;
3084 default:
3085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3086 "0482 Illegal interrupt mode.\n");
3087 break;
3088 }
3089 return;
3090}
3091
3092/**
3093 * lpfc_enable_pci_dev - Enable a generic PCI device.
3094 * @phba: pointer to lpfc hba data structure.
3095 *
3096 * This routine is invoked to enable the PCI device that is common to all
3097 * PCI devices.
3098 *
3099 * Return codes
3100 * 0 - sucessful
3101 * other values - error
3102 **/
3103static int
3104lpfc_enable_pci_dev(struct lpfc_hba *phba)
3105{
3106 struct pci_dev *pdev;
3107 int bars;
3108
3109 /* Obtain PCI device reference */
3110 if (!phba->pcidev)
3111 goto out_error;
3112 else
3113 pdev = phba->pcidev;
3114 /* Select PCI BARs */
3115 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3116 /* Enable PCI device */
3117 if (pci_enable_device_mem(pdev))
3118 goto out_error;
3119 /* Request PCI resource for the device */
3120 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3121 goto out_disable_device;
3122 /* Set up device as PCI master and save state for EEH */
3123 pci_set_master(pdev);
3124 pci_try_set_mwi(pdev);
3125 pci_save_state(pdev);
3126
3127 return 0;
3128
3129out_disable_device:
3130 pci_disable_device(pdev);
3131out_error:
3132 return -ENODEV;
3133}
3134
3135/**
3136 * lpfc_disable_pci_dev - Disable a generic PCI device.
3137 * @phba: pointer to lpfc hba data structure.
3138 *
3139 * This routine is invoked to disable the PCI device that is common to all
3140 * PCI devices.
3141 **/
3142static void
3143lpfc_disable_pci_dev(struct lpfc_hba *phba)
3144{
3145 struct pci_dev *pdev;
3146 int bars;
3147
3148 /* Obtain PCI device reference */
3149 if (!phba->pcidev)
3150 return;
3151 else
3152 pdev = phba->pcidev;
3153 /* Select PCI BARs */
3154 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3155 /* Release PCI resource and disable PCI device */
3156 pci_release_selected_regions(pdev, bars);
3157 pci_disable_device(pdev);
3158 /* Null out PCI private reference to driver */
3159 pci_set_drvdata(pdev, NULL);
3160
3161 return;
3162}
3163
3164/**
3165 * lpfc_reset_hba - Reset a hba
3166 * @phba: pointer to lpfc hba data structure.
3167 *
3168 * This routine is invoked to reset a hba device. It brings the HBA
3169 * offline, performs a board restart, and then brings the board back
3170 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3171 * on outstanding mailbox commands.
3172 **/
3173void
3174lpfc_reset_hba(struct lpfc_hba *phba)
3175{
3176 /* If resets are disabled then set error state and return. */
3177 if (!phba->cfg_enable_hba_reset) {
3178 phba->link_state = LPFC_HBA_ERROR;
3179 return;
3180 }
3181 lpfc_offline_prep(phba);
3182 lpfc_offline(phba);
3183 lpfc_sli_brdrestart(phba);
3184 lpfc_online(phba);
3185 lpfc_unblock_mgmt_io(phba);
3186}
3187
3188/**
3189 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3190 * @phba: pointer to lpfc hba data structure.
3191 *
3192 * This routine is invoked to set up the driver internal resources specific to
3193 * support the SLI-3 HBA device it attached to.
3194 *
3195 * Return codes
3196 * 0 - sucessful
3197 * other values - error
3198 **/
3199static int
3200lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3201{
3202 struct lpfc_sli *psli;
3203
3204 /*
3205 * Initialize timers used by driver
3206 */
3207
3208 /* Heartbeat timer */
3209 init_timer(&phba->hb_tmofunc);
3210 phba->hb_tmofunc.function = lpfc_hb_timeout;
3211 phba->hb_tmofunc.data = (unsigned long)phba;
3212
3213 psli = &phba->sli;
3214 /* MBOX heartbeat timer */
3215 init_timer(&psli->mbox_tmo);
3216 psli->mbox_tmo.function = lpfc_mbox_timeout;
3217 psli->mbox_tmo.data = (unsigned long) phba;
3218 /* FCP polling mode timer */
3219 init_timer(&phba->fcp_poll_timer);
3220 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3221 phba->fcp_poll_timer.data = (unsigned long) phba;
3222 /* Fabric block timer */
3223 init_timer(&phba->fabric_block_timer);
3224 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3225 phba->fabric_block_timer.data = (unsigned long) phba;
3226 /* EA polling mode timer */
3227 init_timer(&phba->eratt_poll);
3228 phba->eratt_poll.function = lpfc_poll_eratt;
3229 phba->eratt_poll.data = (unsigned long) phba;
3230
3231 /* Host attention work mask setup */
3232 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3233 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3234
3235 /* Get all the module params for configuring this host */
3236 lpfc_get_cfgparam(phba);
3237 /*
3238 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3239 * used to create the sg_dma_buf_pool must be dynamically calculated.
3240 * 2 segments are added since the IOCB needs a command and response bde.
3241 */
3242 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3243 sizeof(struct fcp_rsp) +
3244 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3245
3246 if (phba->cfg_enable_bg) {
3247 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3248 phba->cfg_sg_dma_buf_size +=
3249 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3250 }
3251
3252 /* Also reinitialize the host templates with new values. */
3253 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3254 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3255
3256 phba->max_vpi = LPFC_MAX_VPI;
3257 /* This will be set to correct value after config_port mbox */
3258 phba->max_vports = 0;
3259
3260 /*
3261 * Initialize the SLI Layer to run with lpfc HBAs.
3262 */
3263 lpfc_sli_setup(phba);
3264 lpfc_sli_queue_setup(phba);
3265
3266 /* Allocate device driver memory */
3267 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3268 return -ENOMEM;
3269
3270 return 0;
3271}
3272
3273/**
3274 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3275 * @phba: pointer to lpfc hba data structure.
3276 *
3277 * This routine is invoked to unset the driver internal resources set up
3278 * specific for supporting the SLI-3 HBA device it attached to.
3279 **/
3280static void
3281lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3282{
3283 /* Free device driver memory allocated */
3284 lpfc_mem_free_all(phba);
3285
3286 return;
3287}
3288
3289/**
3290 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3291 * @phba: pointer to lpfc hba data structure.
3292 *
3293 * This routine is invoked to set up the driver internal resources specific to
3294 * support the SLI-4 HBA device it attached to.
3295 *
3296 * Return codes
3297 * 0 - sucessful
3298 * other values - error
3299 **/
3300static int
3301lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3302{
3303 struct lpfc_sli *psli;
3304 int rc;
3305 int i, hbq_count;
3306
3307 /* Before proceed, wait for POST done and device ready */
3308 rc = lpfc_sli4_post_status_check(phba);
3309 if (rc)
3310 return -ENODEV;
3311
3312 /*
3313 * Initialize timers used by driver
3314 */
3315
3316 /* Heartbeat timer */
3317 init_timer(&phba->hb_tmofunc);
3318 phba->hb_tmofunc.function = lpfc_hb_timeout;
3319 phba->hb_tmofunc.data = (unsigned long)phba;
3320
3321 psli = &phba->sli;
3322 /* MBOX heartbeat timer */
3323 init_timer(&psli->mbox_tmo);
3324 psli->mbox_tmo.function = lpfc_mbox_timeout;
3325 psli->mbox_tmo.data = (unsigned long) phba;
3326 /* Fabric block timer */
3327 init_timer(&phba->fabric_block_timer);
3328 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3329 phba->fabric_block_timer.data = (unsigned long) phba;
3330 /* EA polling mode timer */
3331 init_timer(&phba->eratt_poll);
3332 phba->eratt_poll.function = lpfc_poll_eratt;
3333 phba->eratt_poll.data = (unsigned long) phba;
3334 /*
3335 * We need to do a READ_CONFIG mailbox command here before
3336 * calling lpfc_get_cfgparam. For VFs this will report the
3337 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3338 * All of the resources allocated
3339 * for this Port are tied to these values.
3340 */
3341 /* Get all the module params for configuring this host */
3342 lpfc_get_cfgparam(phba);
3343 phba->max_vpi = LPFC_MAX_VPI;
3344 /* This will be set to correct value after the read_config mbox */
3345 phba->max_vports = 0;
3346
3347 /* Program the default value of vlan_id and fc_map */
3348 phba->valid_vlan = 0;
3349 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3350 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3351 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3352
3353 /*
3354 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3355 * used to create the sg_dma_buf_pool must be dynamically calculated.
3356 * 2 segments are added since the IOCB needs a command and response bde.
3357 * To insure that the scsi sgl does not cross a 4k page boundary only
3358 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3359 * Table of sgl sizes and seg_cnt:
3360 * sgl size, sg_seg_cnt total seg
3361 * 1k 50 52
3362 * 2k 114 116
3363 * 4k 242 244
3364 * 8k 498 500
3365 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3366 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3367 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3368 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3369 */
3370 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3371 phba->cfg_sg_seg_cnt = 50;
3372 else if (phba->cfg_sg_seg_cnt <= 114)
3373 phba->cfg_sg_seg_cnt = 114;
3374 else if (phba->cfg_sg_seg_cnt <= 242)
3375 phba->cfg_sg_seg_cnt = 242;
3376 else
3377 phba->cfg_sg_seg_cnt = 498;
3378
3379 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3380 + sizeof(struct fcp_rsp);
3381 phba->cfg_sg_dma_buf_size +=
3382 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3383
3384 /* Initialize buffer queue management fields */
3385 hbq_count = lpfc_sli_hbq_count();
3386 for (i = 0; i < hbq_count; ++i)
3387 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3388 INIT_LIST_HEAD(&phba->rb_pend_list);
3389 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3390 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3391
3392 /*
3393 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3394 */
3395 /* Initialize the Abort scsi buffer list used by driver */
3396 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3397 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3398 /* This abort list used by worker thread */
3399 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3400
3401 /*
3402 * Initialize dirver internal slow-path work queues
3403 */
3404
3405 /* Driver internel slow-path CQ Event pool */
3406 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3407 /* Response IOCB work queue list */
3408 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3409 /* Asynchronous event CQ Event work queue list */
3410 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3411 /* Fast-path XRI aborted CQ Event work queue list */
3412 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3413 /* Slow-path XRI aborted CQ Event work queue list */
3414 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3415 /* Receive queue CQ Event work queue list */
3416 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3417
3418 /* Initialize the driver internal SLI layer lists. */
3419 lpfc_sli_setup(phba);
3420 lpfc_sli_queue_setup(phba);
3421
3422 /* Allocate device driver memory */
3423 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3424 if (rc)
3425 return -ENOMEM;
3426
3427 /* Create the bootstrap mailbox command */
3428 rc = lpfc_create_bootstrap_mbox(phba);
3429 if (unlikely(rc))
3430 goto out_free_mem;
3431
3432 /* Set up the host's endian order with the device. */
3433 rc = lpfc_setup_endian_order(phba);
3434 if (unlikely(rc))
3435 goto out_free_bsmbx;
3436
3437 /* Set up the hba's configuration parameters. */
3438 rc = lpfc_sli4_read_config(phba);
3439 if (unlikely(rc))
3440 goto out_free_bsmbx;
3441
3442 /* Perform a function reset */
3443 rc = lpfc_pci_function_reset(phba);
3444 if (unlikely(rc))
3445 goto out_free_bsmbx;
3446
3447 /* Create all the SLI4 queues */
3448 rc = lpfc_sli4_queue_create(phba);
3449 if (rc)
3450 goto out_free_bsmbx;
3451
3452 /* Create driver internal CQE event pool */
3453 rc = lpfc_sli4_cq_event_pool_create(phba);
3454 if (rc)
3455 goto out_destroy_queue;
3456
3457 /* Initialize and populate the iocb list per host */
3458 rc = lpfc_init_sgl_list(phba);
3459 if (rc) {
3460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3461 "1400 Failed to initialize sgl list.\n");
3462 goto out_destroy_cq_event_pool;
3463 }
3464 rc = lpfc_init_active_sgl_array(phba);
3465 if (rc) {
3466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3467 "1430 Failed to initialize sgl list.\n");
3468 goto out_free_sgl_list;
3469 }
3470
3471 rc = lpfc_sli4_init_rpi_hdrs(phba);
3472 if (rc) {
3473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3474 "1432 Failed to initialize rpi headers.\n");
3475 goto out_free_active_sgl;
3476 }
3477
3478 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3479 phba->cfg_fcp_eq_count), GFP_KERNEL);
3480 if (!phba->sli4_hba.fcp_eq_hdl) {
3481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3482 "2572 Failed allocate memory for fast-path "
3483 "per-EQ handle array\n");
3484 goto out_remove_rpi_hdrs;
3485 }
3486
3487 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3488 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3489 if (!phba->sli4_hba.msix_entries) {
3490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3491 "2573 Failed allocate memory for msi-x "
3492 "interrupt vector entries\n");
3493 goto out_free_fcp_eq_hdl;
3494 }
3495
3496 return rc;
3497
3498out_free_fcp_eq_hdl:
3499 kfree(phba->sli4_hba.fcp_eq_hdl);
3500out_remove_rpi_hdrs:
3501 lpfc_sli4_remove_rpi_hdrs(phba);
3502out_free_active_sgl:
3503 lpfc_free_active_sgl(phba);
3504out_free_sgl_list:
3505 lpfc_free_sgl_list(phba);
3506out_destroy_cq_event_pool:
3507 lpfc_sli4_cq_event_pool_destroy(phba);
3508out_destroy_queue:
3509 lpfc_sli4_queue_destroy(phba);
3510out_free_bsmbx:
3511 lpfc_destroy_bootstrap_mbox(phba);
3512out_free_mem:
3513 lpfc_mem_free(phba);
3514 return rc;
3515}
3516
3517/**
3518 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3519 * @phba: pointer to lpfc hba data structure.
3520 *
3521 * This routine is invoked to unset the driver internal resources set up
3522 * specific for supporting the SLI-4 HBA device it attached to.
3523 **/
3524static void
3525lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3526{
3527 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3528
3529 /* unregister default FCFI from the HBA */
3530 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3531
3532 /* Free the default FCR table */
3533 lpfc_sli_remove_dflt_fcf(phba);
3534
3535 /* Free memory allocated for msi-x interrupt vector entries */
3536 kfree(phba->sli4_hba.msix_entries);
3537
3538 /* Free memory allocated for fast-path work queue handles */
3539 kfree(phba->sli4_hba.fcp_eq_hdl);
3540
3541 /* Free the allocated rpi headers. */
3542 lpfc_sli4_remove_rpi_hdrs(phba);
3543 lpfc_sli4_remove_rpis(phba);
3544
3545 /* Free the ELS sgl list */
3546 lpfc_free_active_sgl(phba);
3547 lpfc_free_sgl_list(phba);
3548
3549 /* Free the SCSI sgl management array */
3550 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3551
3552 /* Free the SLI4 queues */
3553 lpfc_sli4_queue_destroy(phba);
3554
3555 /* Free the completion queue EQ event pool */
3556 lpfc_sli4_cq_event_release_all(phba);
3557 lpfc_sli4_cq_event_pool_destroy(phba);
3558
3559 /* Reset SLI4 HBA FCoE function */
3560 lpfc_pci_function_reset(phba);
3561
3562 /* Free the bsmbx region. */
3563 lpfc_destroy_bootstrap_mbox(phba);
3564
3565 /* Free the SLI Layer memory with SLI4 HBAs */
3566 lpfc_mem_free_all(phba);
3567
3568 /* Free the current connect table */
3569 list_for_each_entry_safe(conn_entry, next_conn_entry,
3570 &phba->fcf_conn_rec_list, list)
3571 kfree(conn_entry);
3572
3573 return;
3574}
3575
3576/**
3577 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3578 * @phba: The hba struct for which this call is being executed.
3579 * @dev_grp: The HBA PCI-Device group number.
3580 *
3581 * This routine sets up the device INIT interface API function jump table
3582 * in @phba struct.
3583 *
3584 * Returns: 0 - success, -ENODEV - failure.
3585 **/
3586int
3587lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3588{
3589 switch (dev_grp) {
3590 case LPFC_PCI_DEV_LP:
3591 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3592 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3593 phba->lpfc_stop_port = lpfc_stop_port_s3;
3594 break;
3595 case LPFC_PCI_DEV_OC:
3596 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3597 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3598 phba->lpfc_stop_port = lpfc_stop_port_s4;
3599 break;
3600 default:
3601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3602 "1431 Invalid HBA PCI-device group: 0x%x\n",
3603 dev_grp);
3604 return -ENODEV;
3605 break;
3606 }
3607 return 0;
3608}
3609
3610/**
3611 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3612 * @phba: pointer to lpfc hba data structure.
3613 *
3614 * This routine is invoked to set up the driver internal resources before the
3615 * device specific resource setup to support the HBA device it attached to.
3616 *
3617 * Return codes
3618 * 0 - sucessful
3619 * other values - error
3620 **/
3621static int
3622lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3623{
3624 /*
3625 * Driver resources common to all SLI revisions
3626 */
3627 atomic_set(&phba->fast_event_count, 0);
3628 spin_lock_init(&phba->hbalock);
3629
3630 /* Initialize ndlp management spinlock */
3631 spin_lock_init(&phba->ndlp_lock);
3632
3633 INIT_LIST_HEAD(&phba->port_list);
3634 INIT_LIST_HEAD(&phba->work_list);
3635 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3636
3637 /* Initialize the wait queue head for the kernel thread */
3638 init_waitqueue_head(&phba->work_waitq);
3639
3640 /* Initialize the scsi buffer list used by driver for scsi IO */
3641 spin_lock_init(&phba->scsi_buf_list_lock);
3642 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3643
3644 /* Initialize the fabric iocb list */
3645 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3646
3647 /* Initialize list to save ELS buffers */
3648 INIT_LIST_HEAD(&phba->elsbuf);
3649
3650 /* Initialize FCF connection rec list */
3651 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3652
3653 return 0;
3654}
3655
3656/**
3657 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3658 * @phba: pointer to lpfc hba data structure.
3659 *
3660 * This routine is invoked to set up the driver internal resources after the
3661 * device specific resource setup to support the HBA device it attached to.
3662 *
3663 * Return codes
3664 * 0 - sucessful
3665 * other values - error
3666 **/
3667static int
3668lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3669{
3670 int error;
3671
3672 /* Startup the kernel thread for this host adapter. */
3673 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3674 "lpfc_worker_%d", phba->brd_no);
3675 if (IS_ERR(phba->worker_thread)) {
3676 error = PTR_ERR(phba->worker_thread);
3677 return error;
3678 }
3679
3680 return 0;
3681}
3682
3683/**
3684 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3685 * @phba: pointer to lpfc hba data structure.
3686 *
3687 * This routine is invoked to unset the driver internal resources set up after
3688 * the device specific resource setup for supporting the HBA device it
3689 * attached to.
3690 **/
3691static void
3692lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3693{
3694 /* Stop kernel worker thread */
3695 kthread_stop(phba->worker_thread);
3696}
3697
3698/**
3699 * lpfc_free_iocb_list - Free iocb list.
3700 * @phba: pointer to lpfc hba data structure.
3701 *
3702 * This routine is invoked to free the driver's IOCB list and memory.
3703 **/
3704static void
3705lpfc_free_iocb_list(struct lpfc_hba *phba)
3706{
3707 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3708
3709 spin_lock_irq(&phba->hbalock);
3710 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3711 &phba->lpfc_iocb_list, list) {
3712 list_del(&iocbq_entry->list);
3713 kfree(iocbq_entry);
3714 phba->total_iocbq_bufs--;
3715 }
3716 spin_unlock_irq(&phba->hbalock);
3717
3718 return;
3719}
3720
3721/**
3722 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3723 * @phba: pointer to lpfc hba data structure.
3724 *
3725 * This routine is invoked to allocate and initizlize the driver's IOCB
3726 * list and set up the IOCB tag array accordingly.
3727 *
3728 * Return codes
3729 * 0 - sucessful
3730 * other values - error
3731 **/
3732static int
3733lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3734{
3735 struct lpfc_iocbq *iocbq_entry = NULL;
3736 uint16_t iotag;
3737 int i;
3738
3739 /* Initialize and populate the iocb list per host. */
3740 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3741 for (i = 0; i < iocb_count; i++) {
3742 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3743 if (iocbq_entry == NULL) {
3744 printk(KERN_ERR "%s: only allocated %d iocbs of "
3745 "expected %d count. Unloading driver.\n",
3746 __func__, i, LPFC_IOCB_LIST_CNT);
3747 goto out_free_iocbq;
3748 }
3749
3750 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3751 if (iotag == 0) {
3752 kfree(iocbq_entry);
3753 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3754 "Unloading driver.\n", __func__);
3755 goto out_free_iocbq;
3756 }
3757 iocbq_entry->sli4_xritag = NO_XRI;
3758
3759 spin_lock_irq(&phba->hbalock);
3760 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3761 phba->total_iocbq_bufs++;
3762 spin_unlock_irq(&phba->hbalock);
3763 }
3764
3765 return 0;
3766
3767out_free_iocbq:
3768 lpfc_free_iocb_list(phba);
3769
3770 return -ENOMEM;
3771}
3772
3773/**
3774 * lpfc_free_sgl_list - Free sgl list.
3775 * @phba: pointer to lpfc hba data structure.
3776 *
3777 * This routine is invoked to free the driver's sgl list and memory.
3778 **/
3779static void
3780lpfc_free_sgl_list(struct lpfc_hba *phba)
3781{
3782 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3783 LIST_HEAD(sglq_list);
3784 int rc = 0;
3785
3786 spin_lock_irq(&phba->hbalock);
3787 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3788 spin_unlock_irq(&phba->hbalock);
3789
3790 list_for_each_entry_safe(sglq_entry, sglq_next,
3791 &sglq_list, list) {
3792 list_del(&sglq_entry->list);
3793 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3794 kfree(sglq_entry);
3795 phba->sli4_hba.total_sglq_bufs--;
3796 }
3797 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3798 if (rc) {
3799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3800 "2005 Unable to deregister pages from HBA: %x", rc);
3801 }
3802 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3803}
3804
3805/**
3806 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3807 * @phba: pointer to lpfc hba data structure.
3808 *
3809 * This routine is invoked to allocate the driver's active sgl memory.
3810 * This array will hold the sglq_entry's for active IOs.
3811 **/
3812static int
3813lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3814{
3815 int size;
3816 size = sizeof(struct lpfc_sglq *);
3817 size *= phba->sli4_hba.max_cfg_param.max_xri;
3818
3819 phba->sli4_hba.lpfc_sglq_active_list =
3820 kzalloc(size, GFP_KERNEL);
3821 if (!phba->sli4_hba.lpfc_sglq_active_list)
3822 return -ENOMEM;
3823 return 0;
3824}
3825
3826/**
3827 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3828 * @phba: pointer to lpfc hba data structure.
3829 *
3830 * This routine is invoked to walk through the array of active sglq entries
3831 * and free all of the resources.
3832 * This is just a place holder for now.
3833 **/
3834static void
3835lpfc_free_active_sgl(struct lpfc_hba *phba)
3836{
3837 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3838}
3839
3840/**
3841 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3842 * @phba: pointer to lpfc hba data structure.
3843 *
3844 * This routine is invoked to allocate and initizlize the driver's sgl
3845 * list and set up the sgl xritag tag array accordingly.
3846 *
3847 * Return codes
3848 * 0 - sucessful
3849 * other values - error
3850 **/
3851static int
3852lpfc_init_sgl_list(struct lpfc_hba *phba)
3853{
3854 struct lpfc_sglq *sglq_entry = NULL;
3855 int i;
3856 int els_xri_cnt;
3857
3858 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3860 "2400 lpfc_init_sgl_list els %d.\n",
3861 els_xri_cnt);
3862 /* Initialize and populate the sglq list per host/VF. */
3863 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3865
3866 /* Sanity check on XRI management */
3867 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3869 "2562 No room left for SCSI XRI allocation: "
3870 "max_xri=%d, els_xri=%d\n",
3871 phba->sli4_hba.max_cfg_param.max_xri,
3872 els_xri_cnt);
3873 return -ENOMEM;
3874 }
3875
3876 /* Allocate memory for the ELS XRI management array */
3877 phba->sli4_hba.lpfc_els_sgl_array =
3878 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3879 GFP_KERNEL);
3880
3881 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3883 "2401 Failed to allocate memory for ELS "
3884 "XRI management array of size %d.\n",
3885 els_xri_cnt);
3886 return -ENOMEM;
3887 }
3888
3889 /* Keep the SCSI XRI into the XRI management array */
3890 phba->sli4_hba.scsi_xri_max =
3891 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3892 phba->sli4_hba.scsi_xri_cnt = 0;
3893
3894 phba->sli4_hba.lpfc_scsi_psb_array =
3895 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3896 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3897
3898 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3899 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3900 "2563 Failed to allocate memory for SCSI "
3901 "XRI management array of size %d.\n",
3902 phba->sli4_hba.scsi_xri_max);
3903 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3904 return -ENOMEM;
3905 }
3906
3907 for (i = 0; i < els_xri_cnt; i++) {
3908 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3909 if (sglq_entry == NULL) {
3910 printk(KERN_ERR "%s: only allocated %d sgls of "
3911 "expected %d count. Unloading driver.\n",
3912 __func__, i, els_xri_cnt);
3913 goto out_free_mem;
3914 }
3915
3916 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3917 if (sglq_entry->sli4_xritag == NO_XRI) {
3918 kfree(sglq_entry);
3919 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3920 "Unloading driver.\n", __func__);
3921 goto out_free_mem;
3922 }
3923 sglq_entry->buff_type = GEN_BUFF_TYPE;
3924 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3925 if (sglq_entry->virt == NULL) {
3926 kfree(sglq_entry);
3927 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3928 "Unloading driver.\n", __func__);
3929 goto out_free_mem;
3930 }
3931 sglq_entry->sgl = sglq_entry->virt;
3932 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3933
3934 /* The list order is used by later block SGL registraton */
3935 spin_lock_irq(&phba->hbalock);
3936 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3937 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3938 phba->sli4_hba.total_sglq_bufs++;
3939 spin_unlock_irq(&phba->hbalock);
3940 }
3941 return 0;
3942
3943out_free_mem:
3944 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3945 lpfc_free_sgl_list(phba);
3946 return -ENOMEM;
3947}
3948
3949/**
3950 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
2318 * @phba: pointer to lpfc hba data structure. 3951 * @phba: pointer to lpfc hba data structure.
2319 * 3952 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 3953 * This routine is invoked to post rpi header templates to the
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 3954 * HBA consistent with the SLI-4 interface spec. This routine
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 3955 * posts a PAGE_SIZE memory region to the port to hold up to
2323 * on the current availability of PCI vector resources. The device driver is 3956 * PAGE_SIZE modulo 64 rpi context headers.
2324 * responsible for calling the individual request_irq() to register each MSI-X 3957 * No locks are held here because this is an initialization routine
2325 * vector with a interrupt handler, which is done in this function. Note that 3958 * called only from probe or lpfc_online when interrupts are not
3959 * enabled and the driver is reinitializing the device.
3960 *
3961 * Return codes
3962 * 0 - sucessful
3963 * ENOMEM - No availble memory
3964 * EIO - The mailbox failed to complete successfully.
3965 **/
3966int
3967lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3968{
3969 int rc = 0;
3970 int longs;
3971 uint16_t rpi_count;
3972 struct lpfc_rpi_hdr *rpi_hdr;
3973
3974 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3975
3976 /*
3977 * Provision an rpi bitmask range for discovery. The total count
3978 * is the difference between max and base + 1.
3979 */
3980 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3981 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3982
3983 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3984 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3985 GFP_KERNEL);
3986 if (!phba->sli4_hba.rpi_bmask)
3987 return -ENOMEM;
3988
3989 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3990 if (!rpi_hdr) {
3991 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3992 "0391 Error during rpi post operation\n");
3993 lpfc_sli4_remove_rpis(phba);
3994 rc = -ENODEV;
3995 }
3996
3997 return rc;
3998}
3999
4000/**
4001 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4002 * @phba: pointer to lpfc hba data structure.
4003 *
4004 * This routine is invoked to allocate a single 4KB memory region to
4005 * support rpis and stores them in the phba. This single region
4006 * provides support for up to 64 rpis. The region is used globally
4007 * by the device.
4008 *
4009 * Returns:
4010 * A valid rpi hdr on success.
4011 * A NULL pointer on any failure.
4012 **/
4013struct lpfc_rpi_hdr *
4014lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4015{
4016 uint16_t rpi_limit, curr_rpi_range;
4017 struct lpfc_dmabuf *dmabuf;
4018 struct lpfc_rpi_hdr *rpi_hdr;
4019
4020 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4021 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4022
4023 spin_lock_irq(&phba->hbalock);
4024 curr_rpi_range = phba->sli4_hba.next_rpi;
4025 spin_unlock_irq(&phba->hbalock);
4026
4027 /*
4028 * The port has a limited number of rpis. The increment here
4029 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4030 * and to allow the full max_rpi range per port.
4031 */
4032 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4033 return NULL;
4034
4035 /*
4036 * First allocate the protocol header region for the port. The
4037 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4038 */
4039 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4040 if (!dmabuf)
4041 return NULL;
4042
4043 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4044 LPFC_HDR_TEMPLATE_SIZE,
4045 &dmabuf->phys,
4046 GFP_KERNEL);
4047 if (!dmabuf->virt) {
4048 rpi_hdr = NULL;
4049 goto err_free_dmabuf;
4050 }
4051
4052 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4053 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4054 rpi_hdr = NULL;
4055 goto err_free_coherent;
4056 }
4057
4058 /* Save the rpi header data for cleanup later. */
4059 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4060 if (!rpi_hdr)
4061 goto err_free_coherent;
4062
4063 rpi_hdr->dmabuf = dmabuf;
4064 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4065 rpi_hdr->page_count = 1;
4066 spin_lock_irq(&phba->hbalock);
4067 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4068 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4069
4070 /*
4071 * The next_rpi stores the next module-64 rpi value to post
4072 * in any subsequent rpi memory region postings.
4073 */
4074 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4075 spin_unlock_irq(&phba->hbalock);
4076 return rpi_hdr;
4077
4078 err_free_coherent:
4079 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4080 dmabuf->virt, dmabuf->phys);
4081 err_free_dmabuf:
4082 kfree(dmabuf);
4083 return NULL;
4084}
4085
4086/**
4087 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4088 * @phba: pointer to lpfc hba data structure.
4089 *
4090 * This routine is invoked to remove all memory resources allocated
4091 * to support rpis. This routine presumes the caller has released all
4092 * rpis consumed by fabric or port logins and is prepared to have
4093 * the header pages removed.
4094 **/
4095void
4096lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4097{
4098 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4099
4100 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4101 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4102 list_del(&rpi_hdr->list);
4103 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4104 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4105 kfree(rpi_hdr->dmabuf);
4106 kfree(rpi_hdr);
4107 }
4108
4109 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4110 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4111}
4112
4113/**
4114 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4115 * @pdev: pointer to pci device data structure.
4116 *
4117 * This routine is invoked to allocate the driver hba data structure for an
4118 * HBA device. If the allocation is successful, the phba reference to the
4119 * PCI device data structure is set.
4120 *
4121 * Return codes
4122 * pointer to @phba - sucessful
4123 * NULL - error
4124 **/
4125static struct lpfc_hba *
4126lpfc_hba_alloc(struct pci_dev *pdev)
4127{
4128 struct lpfc_hba *phba;
4129
4130 /* Allocate memory for HBA structure */
4131 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4132 if (!phba) {
4133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4134 "1417 Failed to allocate hba struct.\n");
4135 return NULL;
4136 }
4137
4138 /* Set reference to PCI device in HBA structure */
4139 phba->pcidev = pdev;
4140
4141 /* Assign an unused board number */
4142 phba->brd_no = lpfc_get_instance();
4143 if (phba->brd_no < 0) {
4144 kfree(phba);
4145 return NULL;
4146 }
4147
4148 return phba;
4149}
4150
4151/**
4152 * lpfc_hba_free - Free driver hba data structure with a device.
4153 * @phba: pointer to lpfc hba data structure.
4154 *
4155 * This routine is invoked to free the driver hba data structure with an
4156 * HBA device.
4157 **/
4158static void
4159lpfc_hba_free(struct lpfc_hba *phba)
4160{
4161 /* Release the driver assigned board number */
4162 idr_remove(&lpfc_hba_index, phba->brd_no);
4163
4164 kfree(phba);
4165 return;
4166}
4167
4168/**
4169 * lpfc_create_shost - Create hba physical port with associated scsi host.
4170 * @phba: pointer to lpfc hba data structure.
4171 *
4172 * This routine is invoked to create HBA physical port and associate a SCSI
4173 * host with it.
4174 *
4175 * Return codes
4176 * 0 - sucessful
4177 * other values - error
4178 **/
4179static int
4180lpfc_create_shost(struct lpfc_hba *phba)
4181{
4182 struct lpfc_vport *vport;
4183 struct Scsi_Host *shost;
4184
4185 /* Initialize HBA FC structure */
4186 phba->fc_edtov = FF_DEF_EDTOV;
4187 phba->fc_ratov = FF_DEF_RATOV;
4188 phba->fc_altov = FF_DEF_ALTOV;
4189 phba->fc_arbtov = FF_DEF_ARBTOV;
4190
4191 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4192 if (!vport)
4193 return -ENODEV;
4194
4195 shost = lpfc_shost_from_vport(vport);
4196 phba->pport = vport;
4197 lpfc_debugfs_initialize(vport);
4198 /* Put reference to SCSI host to driver's device private data */
4199 pci_set_drvdata(phba->pcidev, shost);
4200
4201 return 0;
4202}
4203
4204/**
4205 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4206 * @phba: pointer to lpfc hba data structure.
4207 *
4208 * This routine is invoked to destroy HBA physical port and the associated
4209 * SCSI host.
4210 **/
4211static void
4212lpfc_destroy_shost(struct lpfc_hba *phba)
4213{
4214 struct lpfc_vport *vport = phba->pport;
4215
4216 /* Destroy physical port that associated with the SCSI host */
4217 destroy_port(vport);
4218
4219 return;
4220}
4221
4222/**
4223 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4224 * @phba: pointer to lpfc hba data structure.
4225 * @shost: the shost to be used to detect Block guard settings.
4226 *
4227 * This routine sets up the local Block guard protocol settings for @shost.
4228 * This routine also allocates memory for debugging bg buffers.
4229 **/
4230static void
4231lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4232{
4233 int pagecnt = 10;
4234 if (lpfc_prot_mask && lpfc_prot_guard) {
4235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4236 "1478 Registering BlockGuard with the "
4237 "SCSI layer\n");
4238 scsi_host_set_prot(shost, lpfc_prot_mask);
4239 scsi_host_set_guard(shost, lpfc_prot_guard);
4240 }
4241 if (!_dump_buf_data) {
4242 while (pagecnt) {
4243 spin_lock_init(&_dump_buf_lock);
4244 _dump_buf_data =
4245 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4246 if (_dump_buf_data) {
4247 printk(KERN_ERR "BLKGRD allocated %d pages for "
4248 "_dump_buf_data at 0x%p\n",
4249 (1 << pagecnt), _dump_buf_data);
4250 _dump_buf_data_order = pagecnt;
4251 memset(_dump_buf_data, 0,
4252 ((1 << PAGE_SHIFT) << pagecnt));
4253 break;
4254 } else
4255 --pagecnt;
4256 }
4257 if (!_dump_buf_data_order)
4258 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4259 "memory for hexdump\n");
4260 } else
4261 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4262 "\n", _dump_buf_data);
4263 if (!_dump_buf_dif) {
4264 while (pagecnt) {
4265 _dump_buf_dif =
4266 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4267 if (_dump_buf_dif) {
4268 printk(KERN_ERR "BLKGRD allocated %d pages for "
4269 "_dump_buf_dif at 0x%p\n",
4270 (1 << pagecnt), _dump_buf_dif);
4271 _dump_buf_dif_order = pagecnt;
4272 memset(_dump_buf_dif, 0,
4273 ((1 << PAGE_SHIFT) << pagecnt));
4274 break;
4275 } else
4276 --pagecnt;
4277 }
4278 if (!_dump_buf_dif_order)
4279 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4280 "memory for hexdump\n");
4281 } else
4282 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4283 _dump_buf_dif);
4284}
4285
4286/**
4287 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4288 * @phba: pointer to lpfc hba data structure.
4289 *
4290 * This routine is invoked to perform all the necessary post initialization
4291 * setup for the device.
4292 **/
4293static void
4294lpfc_post_init_setup(struct lpfc_hba *phba)
4295{
4296 struct Scsi_Host *shost;
4297 struct lpfc_adapter_event_header adapter_event;
4298
4299 /* Get the default values for Model Name and Description */
4300 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4301
4302 /*
4303 * hba setup may have changed the hba_queue_depth so we need to
4304 * adjust the value of can_queue.
4305 */
4306 shost = pci_get_drvdata(phba->pcidev);
4307 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4308 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4309 lpfc_setup_bg(phba, shost);
4310
4311 lpfc_host_attrib_init(shost);
4312
4313 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4314 spin_lock_irq(shost->host_lock);
4315 lpfc_poll_start_timer(phba);
4316 spin_unlock_irq(shost->host_lock);
4317 }
4318
4319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4320 "0428 Perform SCSI scan\n");
4321 /* Send board arrival event to upper layer */
4322 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4323 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4324 fc_host_post_vendor_event(shost, fc_get_event_number(),
4325 sizeof(adapter_event),
4326 (char *) &adapter_event,
4327 LPFC_NL_VENDOR_ID);
4328 return;
4329}
4330
4331/**
4332 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4333 * @phba: pointer to lpfc hba data structure.
4334 *
4335 * This routine is invoked to set up the PCI device memory space for device
4336 * with SLI-3 interface spec.
4337 *
4338 * Return codes
4339 * 0 - sucessful
4340 * other values - error
4341 **/
4342static int
4343lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4344{
4345 struct pci_dev *pdev;
4346 unsigned long bar0map_len, bar2map_len;
4347 int i, hbq_count;
4348 void *ptr;
4349 int error = -ENODEV;
4350
4351 /* Obtain PCI device reference */
4352 if (!phba->pcidev)
4353 return error;
4354 else
4355 pdev = phba->pcidev;
4356
4357 /* Set the device DMA mask size */
4358 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4359 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4360 return error;
4361
4362 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4363 * required by each mapping.
4364 */
4365 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4366 bar0map_len = pci_resource_len(pdev, 0);
4367
4368 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4369 bar2map_len = pci_resource_len(pdev, 2);
4370
4371 /* Map HBA SLIM to a kernel virtual address. */
4372 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4373 if (!phba->slim_memmap_p) {
4374 dev_printk(KERN_ERR, &pdev->dev,
4375 "ioremap failed for SLIM memory.\n");
4376 goto out;
4377 }
4378
4379 /* Map HBA Control Registers to a kernel virtual address. */
4380 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4381 if (!phba->ctrl_regs_memmap_p) {
4382 dev_printk(KERN_ERR, &pdev->dev,
4383 "ioremap failed for HBA control registers.\n");
4384 goto out_iounmap_slim;
4385 }
4386
4387 /* Allocate memory for SLI-2 structures */
4388 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4389 SLI2_SLIM_SIZE,
4390 &phba->slim2p.phys,
4391 GFP_KERNEL);
4392 if (!phba->slim2p.virt)
4393 goto out_iounmap;
4394
4395 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4396 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4397 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4398 phba->IOCBs = (phba->slim2p.virt +
4399 offsetof(struct lpfc_sli2_slim, IOCBs));
4400
4401 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4402 lpfc_sli_hbq_size(),
4403 &phba->hbqslimp.phys,
4404 GFP_KERNEL);
4405 if (!phba->hbqslimp.virt)
4406 goto out_free_slim;
4407
4408 hbq_count = lpfc_sli_hbq_count();
4409 ptr = phba->hbqslimp.virt;
4410 for (i = 0; i < hbq_count; ++i) {
4411 phba->hbqs[i].hbq_virt = ptr;
4412 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4413 ptr += (lpfc_hbq_defs[i]->entry_count *
4414 sizeof(struct lpfc_hbq_entry));
4415 }
4416 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4417 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4418
4419 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4420
4421 INIT_LIST_HEAD(&phba->rb_pend_list);
4422
4423 phba->MBslimaddr = phba->slim_memmap_p;
4424 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4425 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4426 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4427 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4428
4429 return 0;
4430
4431out_free_slim:
4432 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4433 phba->slim2p.virt, phba->slim2p.phys);
4434out_iounmap:
4435 iounmap(phba->ctrl_regs_memmap_p);
4436out_iounmap_slim:
4437 iounmap(phba->slim_memmap_p);
4438out:
4439 return error;
4440}
4441
4442/**
4443 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4444 * @phba: pointer to lpfc hba data structure.
4445 *
4446 * This routine is invoked to unset the PCI device memory space for device
4447 * with SLI-3 interface spec.
4448 **/
4449static void
4450lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4451{
4452 struct pci_dev *pdev;
4453
4454 /* Obtain PCI device reference */
4455 if (!phba->pcidev)
4456 return;
4457 else
4458 pdev = phba->pcidev;
4459
4460 /* Free coherent DMA memory allocated */
4461 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4462 phba->hbqslimp.virt, phba->hbqslimp.phys);
4463 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4464 phba->slim2p.virt, phba->slim2p.phys);
4465
4466 /* I/O memory unmap */
4467 iounmap(phba->ctrl_regs_memmap_p);
4468 iounmap(phba->slim_memmap_p);
4469
4470 return;
4471}
4472
4473/**
4474 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4475 * @phba: pointer to lpfc hba data structure.
4476 *
4477 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4478 * done and check status.
4479 *
4480 * Return 0 if successful, otherwise -ENODEV.
4481 **/
4482int
4483lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4484{
4485 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4486 uint32_t onlnreg0, onlnreg1;
4487 int i, port_error = -ENODEV;
4488
4489 if (!phba->sli4_hba.STAregaddr)
4490 return -ENODEV;
4491
4492 /* With uncoverable error, log the error message and return error */
4493 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4494 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4495 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4496 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4497 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4498 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4500 "1422 HBA Unrecoverable error: "
4501 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4502 "online0_reg=0x%x, online1_reg=0x%x\n",
4503 uerrlo_reg.word0, uerrhi_reg.word0,
4504 onlnreg0, onlnreg1);
4505 }
4506 return -ENODEV;
4507 }
4508
4509 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4510 for (i = 0; i < 3000; i++) {
4511 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4512 /* Encounter fatal POST error, break out */
4513 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4514 port_error = -ENODEV;
4515 break;
4516 }
4517 if (LPFC_POST_STAGE_ARMFW_READY ==
4518 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4519 port_error = 0;
4520 break;
4521 }
4522 msleep(10);
4523 }
4524
4525 if (port_error)
4526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4527 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4528 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4529 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4530 bf_get(lpfc_hst_state_perr, &sta_reg),
4531 bf_get(lpfc_hst_state_sfi, &sta_reg),
4532 bf_get(lpfc_hst_state_nip, &sta_reg),
4533 bf_get(lpfc_hst_state_ipc, &sta_reg),
4534 bf_get(lpfc_hst_state_xrom, &sta_reg),
4535 bf_get(lpfc_hst_state_dl, &sta_reg),
4536 bf_get(lpfc_hst_state_port_status, &sta_reg));
4537
4538 /* Log device information */
4539 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4541 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4542 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4543 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4544 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4545 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4546 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4547
4548 return port_error;
4549}
4550
4551/**
4552 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4553 * @phba: pointer to lpfc hba data structure.
4554 *
4555 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4556 * memory map.
4557 **/
4558static void
4559lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4560{
4561 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4562 LPFC_UERR_STATUS_LO;
4563 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_HI;
4565 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_ONLINE0;
4567 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE1;
4569 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_SCRATCHPAD;
4571}
4572
4573/**
4574 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4575 * @phba: pointer to lpfc hba data structure.
4576 *
4577 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4578 * memory map.
4579 **/
4580static void
4581lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4582{
4583
4584 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4585 LPFC_HST_STATE;
4586 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_ISR0;
4588 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_IMR0;
4590 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_ISCR0;
4592 return;
4593}
4594
4595/**
4596 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4597 * @phba: pointer to lpfc hba data structure.
4598 * @vf: virtual function number
4599 *
4600 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4601 * based on the given viftual function number, @vf.
4602 *
4603 * Return 0 if successful, otherwise -ENODEV.
4604 **/
4605static int
4606lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4607{
4608 if (vf > LPFC_VIR_FUNC_MAX)
4609 return -ENODEV;
4610
4611 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4612 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4613 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4615 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4617 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4619 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4621 return 0;
4622}
4623
4624/**
4625 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4626 * @phba: pointer to lpfc hba data structure.
4627 *
4628 * This routine is invoked to create the bootstrap mailbox
4629 * region consistent with the SLI-4 interface spec. This
4630 * routine allocates all memory necessary to communicate
4631 * mailbox commands to the port and sets up all alignment
4632 * needs. No locks are expected to be held when calling
4633 * this routine.
4634 *
4635 * Return codes
4636 * 0 - sucessful
4637 * ENOMEM - could not allocated memory.
4638 **/
4639static int
4640lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4641{
4642 uint32_t bmbx_size;
4643 struct lpfc_dmabuf *dmabuf;
4644 struct dma_address *dma_address;
4645 uint32_t pa_addr;
4646 uint64_t phys_addr;
4647
4648 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4649 if (!dmabuf)
4650 return -ENOMEM;
4651
4652 /*
4653 * The bootstrap mailbox region is comprised of 2 parts
4654 * plus an alignment restriction of 16 bytes.
4655 */
4656 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4657 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4658 bmbx_size,
4659 &dmabuf->phys,
4660 GFP_KERNEL);
4661 if (!dmabuf->virt) {
4662 kfree(dmabuf);
4663 return -ENOMEM;
4664 }
4665 memset(dmabuf->virt, 0, bmbx_size);
4666
4667 /*
4668 * Initialize the bootstrap mailbox pointers now so that the register
4669 * operations are simple later. The mailbox dma address is required
4670 * to be 16-byte aligned. Also align the virtual memory as each
4671 * maibox is copied into the bmbx mailbox region before issuing the
4672 * command to the port.
4673 */
4674 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4675 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4676
4677 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4678 LPFC_ALIGN_16_BYTE);
4679 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4680 LPFC_ALIGN_16_BYTE);
4681
4682 /*
4683 * Set the high and low physical addresses now. The SLI4 alignment
4684 * requirement is 16 bytes and the mailbox is posted to the port
4685 * as two 30-bit addresses. The other data is a bit marking whether
4686 * the 30-bit address is the high or low address.
4687 * Upcast bmbx aphys to 64bits so shift instruction compiles
4688 * clean on 32 bit machines.
4689 */
4690 dma_address = &phba->sli4_hba.bmbx.dma_address;
4691 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4692 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4693 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4694 LPFC_BMBX_BIT1_ADDR_HI);
4695
4696 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4697 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4698 LPFC_BMBX_BIT1_ADDR_LO);
4699 return 0;
4700}
4701
4702/**
4703 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4704 * @phba: pointer to lpfc hba data structure.
4705 *
4706 * This routine is invoked to teardown the bootstrap mailbox
4707 * region and release all host resources. This routine requires
4708 * the caller to ensure all mailbox commands recovered, no
4709 * additional mailbox comands are sent, and interrupts are disabled
4710 * before calling this routine.
4711 *
4712 **/
4713static void
4714lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4715{
4716 dma_free_coherent(&phba->pcidev->dev,
4717 phba->sli4_hba.bmbx.bmbx_size,
4718 phba->sli4_hba.bmbx.dmabuf->virt,
4719 phba->sli4_hba.bmbx.dmabuf->phys);
4720
4721 kfree(phba->sli4_hba.bmbx.dmabuf);
4722 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4723}
4724
4725/**
4726 * lpfc_sli4_read_config - Get the config parameters.
4727 * @phba: pointer to lpfc hba data structure.
4728 *
4729 * This routine is invoked to read the configuration parameters from the HBA.
4730 * The configuration parameters are used to set the base and maximum values
4731 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4732 * allocation for the port.
4733 *
4734 * Return codes
4735 * 0 - sucessful
4736 * ENOMEM - No availble memory
4737 * EIO - The mailbox failed to complete successfully.
4738 **/
4739static int
4740lpfc_sli4_read_config(struct lpfc_hba *phba)
4741{
4742 LPFC_MBOXQ_t *pmb;
4743 struct lpfc_mbx_read_config *rd_config;
4744 uint32_t rc = 0;
4745
4746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4747 if (!pmb) {
4748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4749 "2011 Unable to allocate memory for issuing "
4750 "SLI_CONFIG_SPECIAL mailbox command\n");
4751 return -ENOMEM;
4752 }
4753
4754 lpfc_read_config(phba, pmb);
4755
4756 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4757 if (rc != MBX_SUCCESS) {
4758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4759 "2012 Mailbox failed , mbxCmd x%x "
4760 "READ_CONFIG, mbxStatus x%x\n",
4761 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4762 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4763 rc = -EIO;
4764 } else {
4765 rd_config = &pmb->u.mqe.un.rd_config;
4766 phba->sli4_hba.max_cfg_param.max_xri =
4767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4768 phba->sli4_hba.max_cfg_param.xri_base =
4769 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4770 phba->sli4_hba.max_cfg_param.max_vpi =
4771 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4772 phba->sli4_hba.max_cfg_param.vpi_base =
4773 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4774 phba->sli4_hba.max_cfg_param.max_rpi =
4775 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4776 phba->sli4_hba.max_cfg_param.rpi_base =
4777 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4778 phba->sli4_hba.max_cfg_param.max_vfi =
4779 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4780 phba->sli4_hba.max_cfg_param.vfi_base =
4781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4782 phba->sli4_hba.max_cfg_param.max_fcfi =
4783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4784 phba->sli4_hba.max_cfg_param.fcfi_base =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4786 phba->sli4_hba.max_cfg_param.max_eq =
4787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_rq =
4789 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_wq =
4791 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_cq =
4793 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4794 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4795 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4796 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4797 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4798 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4799 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4800 phba->max_vports = phba->max_vpi;
4801 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4802 "2003 cfg params XRI(B:%d M:%d), "
4803 "VPI(B:%d M:%d) "
4804 "VFI(B:%d M:%d) "
4805 "RPI(B:%d M:%d) "
4806 "FCFI(B:%d M:%d)\n",
4807 phba->sli4_hba.max_cfg_param.xri_base,
4808 phba->sli4_hba.max_cfg_param.max_xri,
4809 phba->sli4_hba.max_cfg_param.vpi_base,
4810 phba->sli4_hba.max_cfg_param.max_vpi,
4811 phba->sli4_hba.max_cfg_param.vfi_base,
4812 phba->sli4_hba.max_cfg_param.max_vfi,
4813 phba->sli4_hba.max_cfg_param.rpi_base,
4814 phba->sli4_hba.max_cfg_param.max_rpi,
4815 phba->sli4_hba.max_cfg_param.fcfi_base,
4816 phba->sli4_hba.max_cfg_param.max_fcfi);
4817 }
4818 mempool_free(pmb, phba->mbox_mem_pool);
4819
4820 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4821 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4822 phba->cfg_hba_queue_depth =
4823 phba->sli4_hba.max_cfg_param.max_xri;
4824 return rc;
4825}
4826
4827/**
4828 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4829 * @phba: pointer to lpfc hba data structure.
4830 *
4831 * This routine is invoked to setup the host-side endian order to the
4832 * HBA consistent with the SLI-4 interface spec.
4833 *
4834 * Return codes
4835 * 0 - sucessful
4836 * ENOMEM - No availble memory
4837 * EIO - The mailbox failed to complete successfully.
4838 **/
4839static int
4840lpfc_setup_endian_order(struct lpfc_hba *phba)
4841{
4842 LPFC_MBOXQ_t *mboxq;
4843 uint32_t rc = 0;
4844 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4845 HOST_ENDIAN_HIGH_WORD1};
4846
4847 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4848 if (!mboxq) {
4849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4850 "0492 Unable to allocate memory for issuing "
4851 "SLI_CONFIG_SPECIAL mailbox command\n");
4852 return -ENOMEM;
4853 }
4854
4855 /*
4856 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4857 * words to contain special data values and no other data.
4858 */
4859 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4860 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4861 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4862 if (rc != MBX_SUCCESS) {
4863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4864 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4865 "status x%x\n",
4866 rc);
4867 rc = -EIO;
4868 }
4869
4870 mempool_free(mboxq, phba->mbox_mem_pool);
4871 return rc;
4872}
4873
4874/**
4875 * lpfc_sli4_queue_create - Create all the SLI4 queues
4876 * @phba: pointer to lpfc hba data structure.
4877 *
4878 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4879 * operation. For each SLI4 queue type, the parameters such as queue entry
4880 * count (queue depth) shall be taken from the module parameter. For now,
4881 * we just use some constant number as place holder.
4882 *
4883 * Return codes
4884 * 0 - sucessful
4885 * ENOMEM - No availble memory
4886 * EIO - The mailbox failed to complete successfully.
4887 **/
4888static int
4889lpfc_sli4_queue_create(struct lpfc_hba *phba)
4890{
4891 struct lpfc_queue *qdesc;
4892 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4893 int cfg_fcp_wq_count;
4894 int cfg_fcp_eq_count;
4895
4896 /*
4897 * Sanity check for confiugred queue parameters against the run-time
4898 * device parameters
4899 */
4900
4901 /* Sanity check on FCP fast-path WQ parameters */
4902 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4903 if (cfg_fcp_wq_count >
4904 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4905 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4906 LPFC_SP_WQN_DEF;
4907 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4909 "2581 Not enough WQs (%d) from "
4910 "the pci function for supporting "
4911 "FCP WQs (%d)\n",
4912 phba->sli4_hba.max_cfg_param.max_wq,
4913 phba->cfg_fcp_wq_count);
4914 goto out_error;
4915 }
4916 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4917 "2582 Not enough WQs (%d) from the pci "
4918 "function for supporting the requested "
4919 "FCP WQs (%d), the actual FCP WQs can "
4920 "be supported: %d\n",
4921 phba->sli4_hba.max_cfg_param.max_wq,
4922 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4923 }
4924 /* The actual number of FCP work queues adopted */
4925 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4926
4927 /* Sanity check on FCP fast-path EQ parameters */
4928 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4929 if (cfg_fcp_eq_count >
4930 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4931 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4932 LPFC_SP_EQN_DEF;
4933 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4934 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4935 "2574 Not enough EQs (%d) from the "
4936 "pci function for supporting FCP "
4937 "EQs (%d)\n",
4938 phba->sli4_hba.max_cfg_param.max_eq,
4939 phba->cfg_fcp_eq_count);
4940 goto out_error;
4941 }
4942 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4943 "2575 Not enough EQs (%d) from the pci "
4944 "function for supporting the requested "
4945 "FCP EQs (%d), the actual FCP EQs can "
4946 "be supported: %d\n",
4947 phba->sli4_hba.max_cfg_param.max_eq,
4948 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4949 }
4950 /* It does not make sense to have more EQs than WQs */
4951 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4952 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4953 "2593 The number of FCP EQs (%d) is more "
4954 "than the number of FCP WQs (%d), take "
4955 "the number of FCP EQs same as than of "
4956 "WQs (%d)\n", cfg_fcp_eq_count,
4957 phba->cfg_fcp_wq_count,
4958 phba->cfg_fcp_wq_count);
4959 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4960 }
4961 /* The actual number of FCP event queues adopted */
4962 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4963 /* The overall number of event queues used */
4964 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4965
4966 /*
4967 * Create Event Queues (EQs)
4968 */
4969
4970 /* Get EQ depth from module parameter, fake the default for now */
4971 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4972 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4973
4974 /* Create slow path event queue */
4975 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4976 phba->sli4_hba.eq_ecount);
4977 if (!qdesc) {
4978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4979 "0496 Failed allocate slow-path EQ\n");
4980 goto out_error;
4981 }
4982 phba->sli4_hba.sp_eq = qdesc;
4983
4984 /* Create fast-path FCP Event Queue(s) */
4985 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4986 phba->cfg_fcp_eq_count), GFP_KERNEL);
4987 if (!phba->sli4_hba.fp_eq) {
4988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4989 "2576 Failed allocate memory for fast-path "
4990 "EQ record array\n");
4991 goto out_free_sp_eq;
4992 }
4993 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4994 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4995 phba->sli4_hba.eq_ecount);
4996 if (!qdesc) {
4997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4998 "0497 Failed allocate fast-path EQ\n");
4999 goto out_free_fp_eq;
5000 }
5001 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5002 }
5003
5004 /*
5005 * Create Complete Queues (CQs)
5006 */
5007
5008 /* Get CQ depth from module parameter, fake the default for now */
5009 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5010 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5011
5012 /* Create slow-path Mailbox Command Complete Queue */
5013 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5014 phba->sli4_hba.cq_ecount);
5015 if (!qdesc) {
5016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5017 "0500 Failed allocate slow-path mailbox CQ\n");
5018 goto out_free_fp_eq;
5019 }
5020 phba->sli4_hba.mbx_cq = qdesc;
5021
5022 /* Create slow-path ELS Complete Queue */
5023 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5024 phba->sli4_hba.cq_ecount);
5025 if (!qdesc) {
5026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5027 "0501 Failed allocate slow-path ELS CQ\n");
5028 goto out_free_mbx_cq;
5029 }
5030 phba->sli4_hba.els_cq = qdesc;
5031
5032 /* Create slow-path Unsolicited Receive Complete Queue */
5033 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5034 phba->sli4_hba.cq_ecount);
5035 if (!qdesc) {
5036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5037 "0502 Failed allocate slow-path USOL RX CQ\n");
5038 goto out_free_els_cq;
5039 }
5040 phba->sli4_hba.rxq_cq = qdesc;
5041
5042 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5043 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5044 phba->cfg_fcp_eq_count), GFP_KERNEL);
5045 if (!phba->sli4_hba.fcp_cq) {
5046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5047 "2577 Failed allocate memory for fast-path "
5048 "CQ record array\n");
5049 goto out_free_rxq_cq;
5050 }
5051 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5052 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5053 phba->sli4_hba.cq_ecount);
5054 if (!qdesc) {
5055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5056 "0499 Failed allocate fast-path FCP "
5057 "CQ (%d)\n", fcp_cqidx);
5058 goto out_free_fcp_cq;
5059 }
5060 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5061 }
5062
5063 /* Create Mailbox Command Queue */
5064 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5065 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5066
5067 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5068 phba->sli4_hba.mq_ecount);
5069 if (!qdesc) {
5070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5071 "0505 Failed allocate slow-path MQ\n");
5072 goto out_free_fcp_cq;
5073 }
5074 phba->sli4_hba.mbx_wq = qdesc;
5075
5076 /*
5077 * Create all the Work Queues (WQs)
5078 */
5079 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5080 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5081
5082 /* Create slow-path ELS Work Queue */
5083 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5084 phba->sli4_hba.wq_ecount);
5085 if (!qdesc) {
5086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5087 "0504 Failed allocate slow-path ELS WQ\n");
5088 goto out_free_mbx_wq;
5089 }
5090 phba->sli4_hba.els_wq = qdesc;
5091
5092 /* Create fast-path FCP Work Queue(s) */
5093 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5094 phba->cfg_fcp_wq_count), GFP_KERNEL);
5095 if (!phba->sli4_hba.fcp_wq) {
5096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5097 "2578 Failed allocate memory for fast-path "
5098 "WQ record array\n");
5099 goto out_free_els_wq;
5100 }
5101 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5102 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5103 phba->sli4_hba.wq_ecount);
5104 if (!qdesc) {
5105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5106 "0503 Failed allocate fast-path FCP "
5107 "WQ (%d)\n", fcp_wqidx);
5108 goto out_free_fcp_wq;
5109 }
5110 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5111 }
5112
5113 /*
5114 * Create Receive Queue (RQ)
5115 */
5116 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5117 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5118
5119 /* Create Receive Queue for header */
5120 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5121 phba->sli4_hba.rq_ecount);
5122 if (!qdesc) {
5123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5124 "0506 Failed allocate receive HRQ\n");
5125 goto out_free_fcp_wq;
5126 }
5127 phba->sli4_hba.hdr_rq = qdesc;
5128
5129 /* Create Receive Queue for data */
5130 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5131 phba->sli4_hba.rq_ecount);
5132 if (!qdesc) {
5133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5134 "0507 Failed allocate receive DRQ\n");
5135 goto out_free_hdr_rq;
5136 }
5137 phba->sli4_hba.dat_rq = qdesc;
5138
5139 return 0;
5140
5141out_free_hdr_rq:
5142 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5143 phba->sli4_hba.hdr_rq = NULL;
5144out_free_fcp_wq:
5145 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5146 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5147 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5148 }
5149 kfree(phba->sli4_hba.fcp_wq);
5150out_free_els_wq:
5151 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5152 phba->sli4_hba.els_wq = NULL;
5153out_free_mbx_wq:
5154 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5155 phba->sli4_hba.mbx_wq = NULL;
5156out_free_fcp_cq:
5157 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5158 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5159 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5160 }
5161 kfree(phba->sli4_hba.fcp_cq);
5162out_free_rxq_cq:
5163 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5164 phba->sli4_hba.rxq_cq = NULL;
5165out_free_els_cq:
5166 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5167 phba->sli4_hba.els_cq = NULL;
5168out_free_mbx_cq:
5169 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5170 phba->sli4_hba.mbx_cq = NULL;
5171out_free_fp_eq:
5172 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5173 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5174 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5175 }
5176 kfree(phba->sli4_hba.fp_eq);
5177out_free_sp_eq:
5178 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5179 phba->sli4_hba.sp_eq = NULL;
5180out_error:
5181 return -ENOMEM;
5182}
5183
5184/**
5185 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5186 * @phba: pointer to lpfc hba data structure.
5187 *
5188 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5189 * operation.
5190 *
5191 * Return codes
5192 * 0 - sucessful
5193 * ENOMEM - No availble memory
5194 * EIO - The mailbox failed to complete successfully.
5195 **/
5196static void
5197lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5198{
5199 int fcp_qidx;
5200
5201 /* Release mailbox command work queue */
5202 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5203 phba->sli4_hba.mbx_wq = NULL;
5204
5205 /* Release ELS work queue */
5206 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5207 phba->sli4_hba.els_wq = NULL;
5208
5209 /* Release FCP work queue */
5210 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5211 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5212 kfree(phba->sli4_hba.fcp_wq);
5213 phba->sli4_hba.fcp_wq = NULL;
5214
5215 /* Release unsolicited receive queue */
5216 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5217 phba->sli4_hba.hdr_rq = NULL;
5218 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5219 phba->sli4_hba.dat_rq = NULL;
5220
5221 /* Release unsolicited receive complete queue */
5222 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5223 phba->sli4_hba.rxq_cq = NULL;
5224
5225 /* Release ELS complete queue */
5226 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5227 phba->sli4_hba.els_cq = NULL;
5228
5229 /* Release mailbox command complete queue */
5230 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5231 phba->sli4_hba.mbx_cq = NULL;
5232
5233 /* Release FCP response complete queue */
5234 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5235 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5236 kfree(phba->sli4_hba.fcp_cq);
5237 phba->sli4_hba.fcp_cq = NULL;
5238
5239 /* Release fast-path event queue */
5240 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5241 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5242 kfree(phba->sli4_hba.fp_eq);
5243 phba->sli4_hba.fp_eq = NULL;
5244
5245 /* Release slow-path event queue */
5246 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5247 phba->sli4_hba.sp_eq = NULL;
5248
5249 return;
5250}
5251
5252/**
5253 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5254 * @phba: pointer to lpfc hba data structure.
5255 *
5256 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5257 * operation.
5258 *
5259 * Return codes
5260 * 0 - sucessful
5261 * ENOMEM - No availble memory
5262 * EIO - The mailbox failed to complete successfully.
5263 **/
5264int
5265lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5266{
5267 int rc = -ENOMEM;
5268 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5269 int fcp_cq_index = 0;
5270
5271 /*
5272 * Set up Event Queues (EQs)
5273 */
5274
5275 /* Set up slow-path event queue */
5276 if (!phba->sli4_hba.sp_eq) {
5277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5278 "0520 Slow-path EQ not allocated\n");
5279 goto out_error;
5280 }
5281 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5282 LPFC_SP_DEF_IMAX);
5283 if (rc) {
5284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5285 "0521 Failed setup of slow-path EQ: "
5286 "rc = 0x%x\n", rc);
5287 goto out_error;
5288 }
5289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5290 "2583 Slow-path EQ setup: queue-id=%d\n",
5291 phba->sli4_hba.sp_eq->queue_id);
5292
5293 /* Set up fast-path event queue */
5294 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5295 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5297 "0522 Fast-path EQ (%d) not "
5298 "allocated\n", fcp_eqidx);
5299 goto out_destroy_fp_eq;
5300 }
5301 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5302 phba->cfg_fcp_imax);
5303 if (rc) {
5304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5305 "0523 Failed setup of fast-path EQ "
5306 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5307 goto out_destroy_fp_eq;
5308 }
5309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5310 "2584 Fast-path EQ setup: "
5311 "queue[%d]-id=%d\n", fcp_eqidx,
5312 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5313 }
5314
5315 /*
5316 * Set up Complete Queues (CQs)
5317 */
5318
5319 /* Set up slow-path MBOX Complete Queue as the first CQ */
5320 if (!phba->sli4_hba.mbx_cq) {
5321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5322 "0528 Mailbox CQ not allocated\n");
5323 goto out_destroy_fp_eq;
5324 }
5325 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5326 LPFC_MCQ, LPFC_MBOX);
5327 if (rc) {
5328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5329 "0529 Failed setup of slow-path mailbox CQ: "
5330 "rc = 0x%x\n", rc);
5331 goto out_destroy_fp_eq;
5332 }
5333 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5334 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5335 phba->sli4_hba.mbx_cq->queue_id,
5336 phba->sli4_hba.sp_eq->queue_id);
5337
5338 /* Set up slow-path ELS Complete Queue */
5339 if (!phba->sli4_hba.els_cq) {
5340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5341 "0530 ELS CQ not allocated\n");
5342 goto out_destroy_mbx_cq;
5343 }
5344 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5345 LPFC_WCQ, LPFC_ELS);
5346 if (rc) {
5347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5348 "0531 Failed setup of slow-path ELS CQ: "
5349 "rc = 0x%x\n", rc);
5350 goto out_destroy_mbx_cq;
5351 }
5352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5353 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5354 phba->sli4_hba.els_cq->queue_id,
5355 phba->sli4_hba.sp_eq->queue_id);
5356
5357 /* Set up slow-path Unsolicited Receive Complete Queue */
5358 if (!phba->sli4_hba.rxq_cq) {
5359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5360 "0532 USOL RX CQ not allocated\n");
5361 goto out_destroy_els_cq;
5362 }
5363 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5364 LPFC_RCQ, LPFC_USOL);
5365 if (rc) {
5366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5367 "0533 Failed setup of slow-path USOL RX CQ: "
5368 "rc = 0x%x\n", rc);
5369 goto out_destroy_els_cq;
5370 }
5371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5372 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5373 phba->sli4_hba.rxq_cq->queue_id,
5374 phba->sli4_hba.sp_eq->queue_id);
5375
5376 /* Set up fast-path FCP Response Complete Queue */
5377 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5378 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5380 "0526 Fast-path FCP CQ (%d) not "
5381 "allocated\n", fcp_cqidx);
5382 goto out_destroy_fcp_cq;
5383 }
5384 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5385 phba->sli4_hba.fp_eq[fcp_cqidx],
5386 LPFC_WCQ, LPFC_FCP);
5387 if (rc) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0527 Failed setup of fast-path FCP "
5390 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5391 goto out_destroy_fcp_cq;
5392 }
5393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5394 "2588 FCP CQ setup: cq[%d]-id=%d, "
5395 "parent eq[%d]-id=%d\n",
5396 fcp_cqidx,
5397 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5398 fcp_cqidx,
5399 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5400 }
5401
5402 /*
5403 * Set up all the Work Queues (WQs)
5404 */
5405
5406 /* Set up Mailbox Command Queue */
5407 if (!phba->sli4_hba.mbx_wq) {
5408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5409 "0538 Slow-path MQ not allocated\n");
5410 goto out_destroy_fcp_cq;
5411 }
5412 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5413 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5414 if (rc) {
5415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5416 "0539 Failed setup of slow-path MQ: "
5417 "rc = 0x%x\n", rc);
5418 goto out_destroy_fcp_cq;
5419 }
5420 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5421 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5422 phba->sli4_hba.mbx_wq->queue_id,
5423 phba->sli4_hba.mbx_cq->queue_id);
5424
5425 /* Set up slow-path ELS Work Queue */
5426 if (!phba->sli4_hba.els_wq) {
5427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5428 "0536 Slow-path ELS WQ not allocated\n");
5429 goto out_destroy_mbx_wq;
5430 }
5431 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5432 phba->sli4_hba.els_cq, LPFC_ELS);
5433 if (rc) {
5434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5435 "0537 Failed setup of slow-path ELS WQ: "
5436 "rc = 0x%x\n", rc);
5437 goto out_destroy_mbx_wq;
5438 }
5439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5440 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5441 phba->sli4_hba.els_wq->queue_id,
5442 phba->sli4_hba.els_cq->queue_id);
5443
5444 /* Set up fast-path FCP Work Queue */
5445 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5446 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5448 "0534 Fast-path FCP WQ (%d) not "
5449 "allocated\n", fcp_wqidx);
5450 goto out_destroy_fcp_wq;
5451 }
5452 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5453 phba->sli4_hba.fcp_cq[fcp_cq_index],
5454 LPFC_FCP);
5455 if (rc) {
5456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5457 "0535 Failed setup of fast-path FCP "
5458 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5459 goto out_destroy_fcp_wq;
5460 }
5461 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5462 "2591 FCP WQ setup: wq[%d]-id=%d, "
5463 "parent cq[%d]-id=%d\n",
5464 fcp_wqidx,
5465 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5466 fcp_cq_index,
5467 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5468 /* Round robin FCP Work Queue's Completion Queue assignment */
5469 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5470 }
5471
5472 /*
5473 * Create Receive Queue (RQ)
5474 */
5475 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5477 "0540 Receive Queue not allocated\n");
5478 goto out_destroy_fcp_wq;
5479 }
5480 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5481 phba->sli4_hba.rxq_cq, LPFC_USOL);
5482 if (rc) {
5483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5484 "0541 Failed setup of Receive Queue: "
5485 "rc = 0x%x\n", rc);
5486 goto out_destroy_fcp_wq;
5487 }
5488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5489 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5490 "parent cq-id=%d\n",
5491 phba->sli4_hba.hdr_rq->queue_id,
5492 phba->sli4_hba.dat_rq->queue_id,
5493 phba->sli4_hba.rxq_cq->queue_id);
5494 return 0;
5495
5496out_destroy_fcp_wq:
5497 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5498 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5499 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5500out_destroy_mbx_wq:
5501 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5502out_destroy_fcp_cq:
5503 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5504 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5505 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5506out_destroy_els_cq:
5507 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5508out_destroy_mbx_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5510out_destroy_fp_eq:
5511 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5512 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5513 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5514out_error:
5515 return rc;
5516}
5517
5518/**
5519 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5520 * @phba: pointer to lpfc hba data structure.
5521 *
5522 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5523 * operation.
5524 *
5525 * Return codes
5526 * 0 - sucessful
5527 * ENOMEM - No availble memory
5528 * EIO - The mailbox failed to complete successfully.
5529 **/
5530void
5531lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5532{
5533 int fcp_qidx;
5534
5535 /* Unset mailbox command work queue */
5536 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5537 /* Unset ELS work queue */
5538 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5539 /* Unset unsolicited receive queue */
5540 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5541 /* Unset FCP work queue */
5542 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5543 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5544 /* Unset mailbox command complete queue */
5545 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5546 /* Unset ELS complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5548 /* Unset unsolicited receive complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5550 /* Unset FCP response complete queue */
5551 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5552 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5553 /* Unset fast-path event queue */
5554 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5555 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5556 /* Unset slow-path event queue */
5557 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5558}
5559
5560/**
5561 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5562 * @phba: pointer to lpfc hba data structure.
5563 *
5564 * This routine is invoked to allocate and set up a pool of completion queue
5565 * events. The body of the completion queue event is a completion queue entry
5566 * CQE. For now, this pool is used for the interrupt service routine to queue
5567 * the following HBA completion queue events for the worker thread to process:
5568 * - Mailbox asynchronous events
5569 * - Receive queue completion unsolicited events
5570 * Later, this can be used for all the slow-path events.
5571 *
5572 * Return codes
5573 * 0 - sucessful
5574 * -ENOMEM - No availble memory
5575 **/
5576static int
5577lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5578{
5579 struct lpfc_cq_event *cq_event;
5580 int i;
5581
5582 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5583 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5584 if (!cq_event)
5585 goto out_pool_create_fail;
5586 list_add_tail(&cq_event->list,
5587 &phba->sli4_hba.sp_cqe_event_pool);
5588 }
5589 return 0;
5590
5591out_pool_create_fail:
5592 lpfc_sli4_cq_event_pool_destroy(phba);
5593 return -ENOMEM;
5594}
5595
5596/**
5597 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5598 * @phba: pointer to lpfc hba data structure.
5599 *
5600 * This routine is invoked to free the pool of completion queue events at
5601 * driver unload time. Note that, it is the responsibility of the driver
5602 * cleanup routine to free all the outstanding completion-queue events
5603 * allocated from this pool back into the pool before invoking this routine
5604 * to destroy the pool.
5605 **/
5606static void
5607lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5608{
5609 struct lpfc_cq_event *cq_event, *next_cq_event;
5610
5611 list_for_each_entry_safe(cq_event, next_cq_event,
5612 &phba->sli4_hba.sp_cqe_event_pool, list) {
5613 list_del(&cq_event->list);
5614 kfree(cq_event);
5615 }
5616}
5617
5618/**
5619 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5620 * @phba: pointer to lpfc hba data structure.
5621 *
5622 * This routine is the lock free version of the API invoked to allocate a
5623 * completion-queue event from the free pool.
5624 *
5625 * Return: Pointer to the newly allocated completion-queue event if successful
5626 * NULL otherwise.
5627 **/
5628struct lpfc_cq_event *
5629__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5630{
5631 struct lpfc_cq_event *cq_event = NULL;
5632
5633 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5634 struct lpfc_cq_event, list);
5635 return cq_event;
5636}
5637
5638/**
5639 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5640 * @phba: pointer to lpfc hba data structure.
5641 *
5642 * This routine is the lock version of the API invoked to allocate a
5643 * completion-queue event from the free pool.
5644 *
5645 * Return: Pointer to the newly allocated completion-queue event if successful
5646 * NULL otherwise.
5647 **/
5648struct lpfc_cq_event *
5649lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5650{
5651 struct lpfc_cq_event *cq_event;
5652 unsigned long iflags;
5653
5654 spin_lock_irqsave(&phba->hbalock, iflags);
5655 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5656 spin_unlock_irqrestore(&phba->hbalock, iflags);
5657 return cq_event;
5658}
5659
5660/**
5661 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5662 * @phba: pointer to lpfc hba data structure.
5663 * @cq_event: pointer to the completion queue event to be freed.
5664 *
5665 * This routine is the lock free version of the API invoked to release a
5666 * completion-queue event back into the free pool.
5667 **/
5668void
5669__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5670 struct lpfc_cq_event *cq_event)
5671{
5672 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5673}
5674
5675/**
5676 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5677 * @phba: pointer to lpfc hba data structure.
5678 * @cq_event: pointer to the completion queue event to be freed.
5679 *
5680 * This routine is the lock version of the API invoked to release a
5681 * completion-queue event back into the free pool.
5682 **/
5683void
5684lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5685 struct lpfc_cq_event *cq_event)
5686{
5687 unsigned long iflags;
5688 spin_lock_irqsave(&phba->hbalock, iflags);
5689 __lpfc_sli4_cq_event_release(phba, cq_event);
5690 spin_unlock_irqrestore(&phba->hbalock, iflags);
5691}
5692
5693/**
5694 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5695 * @phba: pointer to lpfc hba data structure.
5696 *
5697 * This routine is to free all the pending completion-queue events to the
5698 * back into the free pool for device reset.
5699 **/
5700static void
5701lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5702{
5703 LIST_HEAD(cqelist);
5704 struct lpfc_cq_event *cqe;
5705 unsigned long iflags;
5706
5707 /* Retrieve all the pending WCQEs from pending WCQE lists */
5708 spin_lock_irqsave(&phba->hbalock, iflags);
5709 /* Pending FCP XRI abort events */
5710 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5711 &cqelist);
5712 /* Pending ELS XRI abort events */
5713 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5714 &cqelist);
5715 /* Pending asynnc events */
5716 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5717 &cqelist);
5718 spin_unlock_irqrestore(&phba->hbalock, iflags);
5719
5720 while (!list_empty(&cqelist)) {
5721 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5722 lpfc_sli4_cq_event_release(phba, cqe);
5723 }
5724}
5725
5726/**
5727 * lpfc_pci_function_reset - Reset pci function.
5728 * @phba: pointer to lpfc hba data structure.
5729 *
5730 * This routine is invoked to request a PCI function reset. It will destroys
5731 * all resources assigned to the PCI function which originates this request.
5732 *
5733 * Return codes
5734 * 0 - sucessful
5735 * ENOMEM - No availble memory
5736 * EIO - The mailbox failed to complete successfully.
5737 **/
5738int
5739lpfc_pci_function_reset(struct lpfc_hba *phba)
5740{
5741 LPFC_MBOXQ_t *mboxq;
5742 uint32_t rc = 0;
5743 uint32_t shdr_status, shdr_add_status;
5744 union lpfc_sli4_cfg_shdr *shdr;
5745
5746 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!mboxq) {
5748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5749 "0494 Unable to allocate memory for issuing "
5750 "SLI_FUNCTION_RESET mailbox command\n");
5751 return -ENOMEM;
5752 }
5753
5754 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5755 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5756 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5757 LPFC_SLI4_MBX_EMBED);
5758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5759 shdr = (union lpfc_sli4_cfg_shdr *)
5760 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5761 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5762 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5763 if (rc != MBX_TIMEOUT)
5764 mempool_free(mboxq, phba->mbox_mem_pool);
5765 if (shdr_status || shdr_add_status || rc) {
5766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5767 "0495 SLI_FUNCTION_RESET mailbox failed with "
5768 "status x%x add_status x%x, mbx status x%x\n",
5769 shdr_status, shdr_add_status, rc);
5770 rc = -ENXIO;
5771 }
5772 return rc;
5773}
5774
5775/**
5776 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5777 * @phba: pointer to lpfc hba data structure.
5778 * @cnt: number of nop mailbox commands to send.
5779 *
5780 * This routine is invoked to send a number @cnt of NOP mailbox command and
5781 * wait for each command to complete.
5782 *
5783 * Return: the number of NOP mailbox command completed.
5784 **/
5785static int
5786lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5787{
5788 LPFC_MBOXQ_t *mboxq;
5789 int length, cmdsent;
5790 uint32_t mbox_tmo;
5791 uint32_t rc = 0;
5792 uint32_t shdr_status, shdr_add_status;
5793 union lpfc_sli4_cfg_shdr *shdr;
5794
5795 if (cnt == 0) {
5796 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5797 "2518 Requested to send 0 NOP mailbox cmd\n");
5798 return cnt;
5799 }
5800
5801 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5802 if (!mboxq) {
5803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5804 "2519 Unable to allocate memory for issuing "
5805 "NOP mailbox command\n");
5806 return 0;
5807 }
5808
5809 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5810 length = (sizeof(struct lpfc_mbx_nop) -
5811 sizeof(struct lpfc_sli4_cfg_mhdr));
5812 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5813 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5814
5815 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5816 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5817 if (!phba->sli4_hba.intr_enable)
5818 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5819 else
5820 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5821 if (rc == MBX_TIMEOUT)
5822 break;
5823 /* Check return status */
5824 shdr = (union lpfc_sli4_cfg_shdr *)
5825 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5826 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5827 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5828 &shdr->response);
5829 if (shdr_status || shdr_add_status || rc) {
5830 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5831 "2520 NOP mailbox command failed "
5832 "status x%x add_status x%x mbx "
5833 "status x%x\n", shdr_status,
5834 shdr_add_status, rc);
5835 break;
5836 }
5837 }
5838
5839 if (rc != MBX_TIMEOUT)
5840 mempool_free(mboxq, phba->mbox_mem_pool);
5841
5842 return cmdsent;
5843}
5844
5845/**
5846 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5847 * @phba: pointer to lpfc hba data structure.
5848 * @fcfi: fcf index.
5849 *
5850 * This routine is invoked to unregister a FCFI from device.
5851 **/
5852void
5853lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5854{
5855 LPFC_MBOXQ_t *mbox;
5856 uint32_t mbox_tmo;
5857 int rc;
5858 unsigned long flags;
5859
5860 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5861
5862 if (!mbox)
5863 return;
5864
5865 lpfc_unreg_fcfi(mbox, fcfi);
5866
5867 if (!phba->sli4_hba.intr_enable)
5868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5869 else {
5870 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5871 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5872 }
5873 if (rc != MBX_TIMEOUT)
5874 mempool_free(mbox, phba->mbox_mem_pool);
5875 if (rc != MBX_SUCCESS)
5876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5877 "2517 Unregister FCFI command failed "
5878 "status %d, mbxStatus x%x\n", rc,
5879 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5880 else {
5881 spin_lock_irqsave(&phba->hbalock, flags);
5882 /* Mark the FCFI is no longer registered */
5883 phba->fcf.fcf_flag &=
5884 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5885 spin_unlock_irqrestore(&phba->hbalock, flags);
5886 }
5887}
5888
5889/**
5890 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5891 * @phba: pointer to lpfc hba data structure.
5892 *
5893 * This routine is invoked to set up the PCI device memory space for device
5894 * with SLI-4 interface spec.
5895 *
5896 * Return codes
5897 * 0 - sucessful
5898 * other values - error
5899 **/
5900static int
5901lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5902{
5903 struct pci_dev *pdev;
5904 unsigned long bar0map_len, bar1map_len, bar2map_len;
5905 int error = -ENODEV;
5906
5907 /* Obtain PCI device reference */
5908 if (!phba->pcidev)
5909 return error;
5910 else
5911 pdev = phba->pcidev;
5912
5913 /* Set the device DMA mask size */
5914 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5915 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5916 return error;
5917
5918 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5919 * number of bytes required by each mapping. They are actually
5920 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5921 */
5922 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5923 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5924
5925 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5926 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5927
5928 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5929 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5930
5931 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5932 phba->sli4_hba.conf_regs_memmap_p =
5933 ioremap(phba->pci_bar0_map, bar0map_len);
5934 if (!phba->sli4_hba.conf_regs_memmap_p) {
5935 dev_printk(KERN_ERR, &pdev->dev,
5936 "ioremap failed for SLI4 PCI config registers.\n");
5937 goto out;
5938 }
5939
5940 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5941 phba->sli4_hba.ctrl_regs_memmap_p =
5942 ioremap(phba->pci_bar1_map, bar1map_len);
5943 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5944 dev_printk(KERN_ERR, &pdev->dev,
5945 "ioremap failed for SLI4 HBA control registers.\n");
5946 goto out_iounmap_conf;
5947 }
5948
5949 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5950 phba->sli4_hba.drbl_regs_memmap_p =
5951 ioremap(phba->pci_bar2_map, bar2map_len);
5952 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5953 dev_printk(KERN_ERR, &pdev->dev,
5954 "ioremap failed for SLI4 HBA doorbell registers.\n");
5955 goto out_iounmap_ctrl;
5956 }
5957
5958 /* Set up BAR0 PCI config space register memory map */
5959 lpfc_sli4_bar0_register_memmap(phba);
5960
5961 /* Set up BAR1 register memory map */
5962 lpfc_sli4_bar1_register_memmap(phba);
5963
5964 /* Set up BAR2 register memory map */
5965 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5966 if (error)
5967 goto out_iounmap_all;
5968
5969 return 0;
5970
5971out_iounmap_all:
5972 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5973out_iounmap_ctrl:
5974 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5975out_iounmap_conf:
5976 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5977out:
5978 return error;
5979}
5980
5981/**
5982 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5983 * @phba: pointer to lpfc hba data structure.
5984 *
5985 * This routine is invoked to unset the PCI device memory space for device
5986 * with SLI-4 interface spec.
5987 **/
5988static void
5989lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5990{
5991 struct pci_dev *pdev;
5992
5993 /* Obtain PCI device reference */
5994 if (!phba->pcidev)
5995 return;
5996 else
5997 pdev = phba->pcidev;
5998
5999 /* Free coherent DMA memory allocated */
6000
6001 /* Unmap I/O memory space */
6002 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6003 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6004 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6005
6006 return;
6007}
6008
6009/**
6010 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6011 * @phba: pointer to lpfc hba data structure.
6012 *
6013 * This routine is invoked to enable the MSI-X interrupt vectors to device
6014 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6015 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6016 * invoked, enables either all or nothing, depending on the current
6017 * availability of PCI vector resources. The device driver is responsible
6018 * for calling the individual request_irq() to register each MSI-X vector
6019 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6020 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6021 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6022 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6027,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6027 * other values - error
2334 **/ 6028 **/
2335static int 6029static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6030lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6031{
2338 int rc, i; 6032 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6033 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6043,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6043 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6044 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6045 goto msi_fail_out;
2352 } else 6046 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6047 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6049 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6050 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6051 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6052 phba->msix_entries[i].entry);
2359 /* 6053 /*
2360 * Assign MSI-X vectors to interrupt handlers 6054 * Assign MSI-X vectors to interrupt handlers
2361 */ 6055 */
2362 6056
2363 /* vector-0 is associated to slow-path handler */ 6057 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6058 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6059 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6060 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6061 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6062 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6063 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6066,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6066 }
2372 6067
2373 /* vector-1 is associated to fast-path handler */ 6068 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6069 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6070 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6071 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6072
2377 if (rc) { 6073 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6074 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6097,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6097 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6098 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6099 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6100 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6101 goto mbx_fail_out;
2406 } 6102 }
2407 6103
@@ -2428,14 +6124,14 @@ msi_fail_out:
2428} 6124}
2429 6125
2430/** 6126/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6127 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6128 * @phba: pointer to lpfc hba data structure.
2433 * 6129 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6130 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6131 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6132 **/
2437static void 6133static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6134lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6135{
2440 int i; 6136 int i;
2441 6137
@@ -2444,23 +6140,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6140 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6141 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6142 pci_disable_msix(phba->pcidev);
6143
6144 return;
2447} 6145}
2448 6146
2449/** 6147/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6148 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6149 * @phba: pointer to lpfc hba data structure.
2452 * 6150 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6151 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6152 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6153 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6154 * request_irq() to register MSI vector with a interrupt the handler, which
6155 * is done in this function.
2457 * 6156 *
2458 * Return codes 6157 * Return codes
2459 * 0 - sucessful 6158 * 0 - sucessful
2460 * other values - error 6159 * other values - error
2461 */ 6160 */
2462static int 6161static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6162lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6163{
2465 int rc; 6164 int rc;
2466 6165
@@ -2474,7 +6173,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6173 return rc;
2475 } 6174 }
2476 6175
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6176 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6177 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6178 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6179 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6184,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6184}
2486 6185
2487/** 6186/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6187 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6188 * @phba: pointer to lpfc hba data structure.
2490 * 6189 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6190 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6191 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6192 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6193 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6194 * its vector.
2495 */ 6195 */
2496
2497static void 6196static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6197lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6198{
2500 free_irq(phba->pcidev->irq, phba); 6199 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6200 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6202,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6202}
2504 6203
2505/** 6204/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6205 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6206 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6207 *
2510 * This routine it invoked to log the currently used active interrupt mode 6208 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6209 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6210 * spec. Depends on the interrupt mode configured to the driver, the driver
6211 * will try to fallback from the configured interrupt mode to an interrupt
6212 * mode which is supported by the platform, kernel, and device in the order
6213 * of:
6214 * MSI-X -> MSI -> IRQ.
6215 *
6216 * Return codes
6217 * 0 - sucessful
6218 * other values - error
6219 **/
6220static uint32_t
6221lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6222{
6223 uint32_t intr_mode = LPFC_INTR_ERROR;
6224 int retval;
6225
6226 if (cfg_mode == 2) {
6227 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6228 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6229 if (!retval) {
6230 /* Now, try to enable MSI-X interrupt mode */
6231 retval = lpfc_sli_enable_msix(phba);
6232 if (!retval) {
6233 /* Indicate initialization to MSI-X mode */
6234 phba->intr_type = MSIX;
6235 intr_mode = 2;
6236 }
6237 }
6238 }
6239
6240 /* Fallback to MSI if MSI-X initialization failed */
6241 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6242 retval = lpfc_sli_enable_msi(phba);
6243 if (!retval) {
6244 /* Indicate initialization to MSI mode */
6245 phba->intr_type = MSI;
6246 intr_mode = 1;
6247 }
6248 }
6249
6250 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6251 if (phba->intr_type == NONE) {
6252 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6253 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6254 if (!retval) {
6255 /* Indicate initialization to INTx mode */
6256 phba->intr_type = INTx;
6257 intr_mode = 0;
6258 }
6259 }
6260 return intr_mode;
6261}
6262
6263/**
6264 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6265 * @phba: pointer to lpfc hba data structure.
6266 *
6267 * This routine is invoked to disable device interrupt and disassociate the
6268 * driver's interrupt handler(s) from interrupt vector(s) to device with
6269 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6270 * release the interrupt vector(s) for the message signaled interrupt.
6271 **/
2513static void 6272static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6273lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6274{
2516 switch (intr_mode) { 6275 /* Disable the currently initialized interrupt mode */
2517 case 0: 6276 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6277 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6278 else if (phba->intr_type == MSI)
2520 break; 6279 lpfc_sli_disable_msi(phba);
2521 case 1: 6280 else if (phba->intr_type == INTx)
6281 free_irq(phba->pcidev->irq, phba);
6282
6283 /* Reset interrupt management states */
6284 phba->intr_type = NONE;
6285 phba->sli.slistat.sli_intr = 0;
6286
6287 return;
6288}
6289
6290/**
6291 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6292 * @phba: pointer to lpfc hba data structure.
6293 *
6294 * This routine is invoked to enable the MSI-X interrupt vectors to device
6295 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6296 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6297 * enables either all or nothing, depending on the current availability of
6298 * PCI vector resources. The device driver is responsible for calling the
6299 * individual request_irq() to register each MSI-X vector with a interrupt
6300 * handler, which is done in this function. Note that later when device is
6301 * unloading, the driver should always call free_irq() on all MSI-X vectors
6302 * it has done request_irq() on before calling pci_disable_msix(). Failure
6303 * to do so results in a BUG_ON() and a device will be left with MSI-X
6304 * enabled and leaks its vectors.
6305 *
6306 * Return codes
6307 * 0 - sucessful
6308 * other values - error
6309 **/
6310static int
6311lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6312{
6313 int rc, index;
6314
6315 /* Set up MSI-X multi-message vectors */
6316 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6317 phba->sli4_hba.msix_entries[index].entry = index;
6318
6319 /* Configure MSI-X capability structure */
6320 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6321 phba->sli4_hba.cfg_eqn);
6322 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6323 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6324 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6325 goto msi_fail_out;
2525 case 2: 6326 }
6327 /* Log MSI-X vector assignment */
6328 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6329 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6330 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6331 "message=%d\n", index,
2529 default: 6332 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6333 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6334 /*
2532 break; 6335 * Assign MSI-X vectors to interrupt handlers
6336 */
6337
6338 /* The first vector must associated to slow-path handler for MQ */
6339 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6340 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6341 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6342 if (rc) {
6343 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6344 "0485 MSI-X slow-path request_irq failed "
6345 "(%d)\n", rc);
6346 goto msi_fail_out;
2533 } 6347 }
2534 return; 6348
6349 /* The rest of the vector(s) are associated to fast-path handler(s) */
6350 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6351 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6352 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6353 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6354 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6355 LPFC_FP_DRIVER_HANDLER_NAME,
6356 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6357 if (rc) {
6358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6359 "0486 MSI-X fast-path (%d) "
6360 "request_irq failed (%d)\n", index, rc);
6361 goto cfg_fail_out;
6362 }
6363 }
6364
6365 return rc;
6366
6367cfg_fail_out:
6368 /* free the irq already requested */
6369 for (--index; index >= 1; index--)
6370 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6371 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6372
6373 /* free the irq already requested */
6374 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6375
6376msi_fail_out:
6377 /* Unconfigure MSI-X capability structure */
6378 pci_disable_msix(phba->pcidev);
6379 return rc;
2535} 6380}
2536 6381
6382/**
6383 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6384 * @phba: pointer to lpfc hba data structure.
6385 *
6386 * This routine is invoked to release the MSI-X vectors and then disable the
6387 * MSI-X interrupt mode to device with SLI-4 interface spec.
6388 **/
2537static void 6389static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6390lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6391{
2540 /* Clear all interrupt enable conditions */ 6392 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6393
2547 /* Reset some HBA SLI setup states */ 6394 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6395 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6396
6397 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6398 free_irq(phba->sli4_hba.msix_entries[index].vector,
6399 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6400 /* Disable MSI-X */
6401 pci_disable_msix(phba->pcidev);
6402
6403 return;
6404}
6405
6406/**
6407 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6408 * @phba: pointer to lpfc hba data structure.
6409 *
6410 * This routine is invoked to enable the MSI interrupt mode to device with
6411 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6412 * to enable the MSI vector. The device driver is responsible for calling
6413 * the request_irq() to register MSI vector with a interrupt the handler,
6414 * which is done in this function.
6415 *
6416 * Return codes
6417 * 0 - sucessful
6418 * other values - error
6419 **/
6420static int
6421lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6422{
6423 int rc, index;
2550 6424
6425 rc = pci_enable_msi(phba->pcidev);
6426 if (!rc)
6427 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6428 "0487 PCI enable MSI mode success.\n");
6429 else {
6430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6431 "0488 PCI enable MSI mode failed (%d)\n", rc);
6432 return rc;
6433 }
6434
6435 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6436 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6437 if (rc) {
6438 pci_disable_msi(phba->pcidev);
6439 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6440 "0490 MSI request_irq failed (%d)\n", rc);
6441 }
6442
6443 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6444 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6445 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6446 }
6447
6448 return rc;
6449}
6450
6451/**
6452 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6453 * @phba: pointer to lpfc hba data structure.
6454 *
6455 * This routine is invoked to disable the MSI interrupt mode to device with
6456 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6457 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6458 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6459 * its vector.
6460 **/
6461static void
6462lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6463{
6464 free_irq(phba->pcidev->irq, phba);
6465 pci_disable_msi(phba->pcidev);
2551 return; 6466 return;
2552} 6467}
2553 6468
2554/** 6469/**
2555 * lpfc_enable_intr - Enable device interrupt 6470 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6471 * @phba: pointer to lpfc hba data structure.
2557 * 6472 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6473 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6474 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6475 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6476 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6477 * interrupt mode which is supported by the platform, kernel, and device in
6478 * the order of:
6479 * MSI-X -> MSI -> IRQ.
2563 * 6480 *
2564 * Return codes 6481 * Return codes
2565 * 0 - sucessful 6482 * 0 - sucessful
2566 * other values - error 6483 * other values - error
2567 **/ 6484 **/
2568static uint32_t 6485static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6486lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6487{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6488 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6489 int retval, index;
2573 6490
2574 if (cfg_mode == 2) { 6491 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6492 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6493 retval = 0;
2577 if (!retval) { 6494 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6495 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6496 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6497 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6498 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6499 phba->intr_type = MSIX;
@@ -2587,7 +6504,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6504
2588 /* Fallback to MSI if MSI-X initialization failed */ 6505 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6506 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6507 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6508 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6509 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6510 phba->intr_type = MSI;
@@ -2597,34 +6514,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6514
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6515 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6516 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6517 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6518 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6519 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6520 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6521 phba->intr_type = INTx;
2605 intr_mode = 0; 6522 intr_mode = 0;
6523 for (index = 0; index < phba->cfg_fcp_eq_count;
6524 index++) {
6525 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6526 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6527 }
2606 } 6528 }
2607 } 6529 }
2608 return intr_mode; 6530 return intr_mode;
2609} 6531}
2610 6532
2611/** 6533/**
2612 * lpfc_disable_intr - Disable device interrupt 6534 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6535 * @phba: pointer to lpfc hba data structure.
2614 * 6536 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6537 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6538 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6539 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6540 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6541 **/
2620static void 6542static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6543lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6544{
2623 /* Disable the currently initialized interrupt mode */ 6545 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6546 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6547 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6548 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6549 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6550 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6551 free_irq(phba->pcidev->irq, phba);
2630 6552
@@ -2636,263 +6558,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6558}
2637 6559
2638/** 6560/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6561 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6562 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6563 *
2651 * Return code 6564 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6565 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6566 **/
2655static int __devinit 6567static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6568lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6569{
2658 struct lpfc_vport *vport = NULL; 6570 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
2678 if (!phba)
2679 goto out_release_regions;
2680 6572
2681 atomic_set(&phba->fast_event_count, 0); 6573 spin_lock_irq(shost->host_lock);
2682 spin_lock_init(&phba->hbalock); 6574 vport->load_flag |= FC_UNLOADING;
6575 spin_unlock_irq(shost->host_lock);
2683 6576
2684 /* Initialize ndlp management spinlock */ 6577 lpfc_stop_hba_timers(phba);
2685 spin_lock_init(&phba->ndlp_lock);
2686 6578
2687 phba->pcidev = pdev; 6579 phba->pport->work_port_events = 0;
2688 6580
2689 /* Assign an unused board number */ 6581 lpfc_sli_hba_down(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6582
2693 INIT_LIST_HEAD(&phba->port_list); 6583 lpfc_sli_brdrestart(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6584
2702 /* Initialize timers used by driver */ 6585 lpfc_sli_disable_intr(phba);
2703 init_timer(&phba->hb_tmofunc);
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6586
2707 psli = &phba->sli; 6587 return;
2708 init_timer(&psli->mbox_tmo); 6588}
2709 psli->mbox_tmo.function = lpfc_mbox_timeout;
2710 psli->mbox_tmo.data = (unsigned long) phba;
2711 init_timer(&phba->fcp_poll_timer);
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout;
2713 phba->fcp_poll_timer.data = (unsigned long) phba;
2714 init_timer(&phba->fabric_block_timer);
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
2716 phba->fabric_block_timer.data = (unsigned long) phba;
2717 init_timer(&phba->eratt_poll);
2718 phba->eratt_poll.function = lpfc_poll_eratt;
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6589
2721 pci_set_master(pdev); 6590/**
2722 pci_save_state(pdev); 6591 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2723 pci_try_set_mwi(pdev); 6592 * @phba: pointer to lpfc hba data structure.
6593 *
6594 * This routine is invoked to unset the HBA device initialization steps to
6595 * a device with SLI-4 interface spec.
6596 **/
6597static void
6598lpfc_sli4_unset_hba(struct lpfc_hba *phba)
6599{
6600 struct lpfc_vport *vport = phba->pport;
6601 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2724 6602
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6603 spin_lock_irq(shost->host_lock);
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0) 6604 vport->load_flag |= FC_UNLOADING;
2727 goto out_idr_remove; 6605 spin_unlock_irq(shost->host_lock);
2728 6606
2729 /* 6607 phba->pport->work_port_events = 0;
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6608
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6609 lpfc_sli4_hba_down(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6610
2739 /* Map HBA SLIM to a kernel virtual address. */ 6611 lpfc_sli4_disable_intr(phba);
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747 6612
2748 /* Map HBA Control Registers to a kernel virtual address. */ 6613 return;
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6614}
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6615
2757 /* Allocate memory for SLI-2 structures */ 6616/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6617 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6618 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6619 *
2761 GFP_KERNEL); 6620 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6621 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6622 * issues PCI function reset mailbox command to reset the FCoE function.
6623 * At the end of the function, it calls lpfc_hba_down_post function to
6624 * free any pending commands.
6625 **/
6626static void
6627lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6628{
6629 int wait_cnt = 0;
6630 LPFC_MBOXQ_t *mboxq;
2764 6631
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6632 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6633 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6634
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6635 /*
2772 lpfc_sli_hbq_size(), 6636 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6637 * mailbox command.
2774 GFP_KERNEL); 6638 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6639
2778 hbq_count = lpfc_sli_hbq_count(); 6640 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6641 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6642 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6643 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6644 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6645 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6646 msleep(10);
6647 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6648 break;
2785 } 6649 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6650 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6651 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6652 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6653 mboxq = phba->sli.mbox_active;
2790 6654 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6655 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6656 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6657 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6658 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6659 }
2802 6660
2803 /* Initialize and populate the iocb list per host. */ 6661 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6662 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6663
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6664 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6665 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6666
2825 spin_lock_irq(&phba->hbalock); 6667 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6668 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6669
2831 /* Initialize HBA structure */ 6670 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6671 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6672}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6673
2837 INIT_LIST_HEAD(&phba->work_list); 6674/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6675 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6676 * @pdev: pointer to PCI device
6677 * @pid: pointer to PCI device identifier
6678 *
6679 * This routine is to be called to attach a device with SLI-3 interface spec
6680 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6681 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6682 * information of the device and driver to see if the driver state that it can
6683 * support this kind of device. If the match is successful, the driver core
6684 * invokes this routine. If this routine determines it can claim the HBA, it
6685 * does all the initialization that it needs to do to handle the HBA properly.
6686 *
6687 * Return code
6688 * 0 - driver can claim the device
6689 * negative value - driver can not claim the device
6690 **/
6691static int __devinit
6692lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6693{
6694 struct lpfc_hba *phba;
6695 struct lpfc_vport *vport = NULL;
6696 int error;
6697 uint32_t cfg_mode, intr_mode;
2840 6698
2841 /* Initialize the wait queue head for the kernel thread */ 6699 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6700 phba = lpfc_hba_alloc(pdev);
6701 if (!phba)
6702 return -ENOMEM;
2843 6703
2844 /* Startup the kernel thread for this host adapter. */ 6704 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6705 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6706 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6708 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6709 goto out_free_phba;
2850 } 6710 }
2851 6711
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6712 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6713 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6714 if (error)
6715 goto out_disable_pci_dev;
2855 6716
2856 /* Initialize list of fabric iocbs */ 6717 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6718 error = lpfc_sli_pci_mem_setup(phba);
6719 if (error) {
6720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6721 "1402 Failed to set up pci memory space.\n");
6722 goto out_disable_pci_dev;
6723 }
2858 6724
2859 /* Initialize list to save ELS buffers */ 6725 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6726 error = lpfc_setup_driver_resource_phase1(phba);
6727 if (error) {
6728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6729 "1403 Failed to set up driver resource.\n");
6730 goto out_unset_pci_mem_s3;
6731 }
2861 6732
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6733 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6734 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6735 if (error) {
6736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6737 "1404 Failed to set up driver resource.\n");
6738 goto out_unset_pci_mem_s3;
6739 }
2865 6740
2866 shost = lpfc_shost_from_vport(vport); 6741 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6742 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6743 if (error) {
6744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6745 "1405 Failed to initialize iocb list.\n");
6746 goto out_unset_driver_resource_s3;
6747 }
2869 6748
2870 pci_set_drvdata(pdev, shost); 6749 /* Set up common device driver resources */
6750 error = lpfc_setup_driver_resource_phase2(phba);
6751 if (error) {
6752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6753 "1406 Failed to set up driver resource.\n");
6754 goto out_free_iocb_list;
6755 }
2871 6756
2872 phba->MBslimaddr = phba->slim_memmap_p; 6757 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6758 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6759 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6761 "1407 Failed to create scsi host.\n");
6762 goto out_unset_driver_resource;
6763 }
2877 6764
2878 /* Configure sysfs attributes */ 6765 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6766 vport = phba->pport;
6767 error = lpfc_alloc_sysfs_attr(vport);
6768 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6770 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6771 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6772 }
2885 6773
6774 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6775 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6776 while (true) {
6777 /* Put device to a known state before enabling interrupt */
6778 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6779 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6780 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6781 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6783 "0431 Failed to enable interrupt.\n");
6784 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6785 goto out_free_sysfs_attr;
2894 } 6786 }
2895 /* HBA SLI setup */ 6787 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6788 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6790 "1477 Failed to set up hba\n");
@@ -2902,185 +6794,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6794
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6795 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6796 msleep(50);
2905 /* Check active interrupts received */ 6797 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6798 if (intr_mode == 0 ||
6799 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6800 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6801 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6802 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6803 break;
2911 } else { 6804 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6805 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6806 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6807 "failed active interrupt test.\n",
2915 intr_mode); 6808 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6809 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6810 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6811 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6812 cfg_mode = --intr_mode;
2929 } 6813 }
2930 } 6814 }
2931 6815
2932 /* 6816 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6817 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6818
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6819 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6820 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6821
3025 return 0; 6822 return 0;
3026 6823
3027out_remove_device: 6824out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6825 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6826out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6827 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6828out_destroy_shost:
3039 destroy_port(vport); 6829 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6830out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6831 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6832out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6833 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6834out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6835 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6836out_unset_pci_mem_s3:
3047 } 6837 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6838out_disable_pci_dev:
3049out_free_hbqslimp: 6839 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6840out_free_phba:
3062 kfree(phba); 6841 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6842 return error;
3072} 6843}
3073 6844
3074/** 6845/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6846 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6847 * @pdev: pointer to PCI device
3077 * 6848 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6849 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6850 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6851 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6852 * device to be removed from the PCI subsystem properly.
3081 **/ 6853 **/
3082static void __devexit 6854static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6855lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6856{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6857 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6870,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6870 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6871 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6872 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6873 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6874 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6875 lpfc_destroy_vport_work_array(phba, vports);
3104 6876
@@ -3120,7 +6892,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6892 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6893 lpfc_sli_brdrestart(phba);
3122 6894
3123 lpfc_stop_phba_timers(phba); 6895 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6896 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6897 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6898 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6900,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6900 lpfc_debugfs_terminate(vport);
3129 6901
3130 /* Disable interrupt */ 6902 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6903 lpfc_sli_disable_intr(phba);
3132 6904
3133 pci_set_drvdata(pdev, NULL); 6905 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6906 scsi_host_put(shost);
@@ -3138,7 +6910,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6910 * corresponding pools here.
3139 */ 6911 */
3140 lpfc_scsi_free(phba); 6912 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6913 lpfc_mem_free_all(phba);
3142 6914
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6915 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6916 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6923,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6923 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6924 iounmap(phba->slim_memmap_p);
3153 6925
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6926 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6927
3158 pci_release_selected_regions(pdev, bars); 6928 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6929 pci_disable_device(pdev);
3160} 6930}
3161 6931
3162/** 6932/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6933 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6934 * @pdev: pointer to PCI device
3165 * @msg: power management message 6935 * @msg: power management message
3166 * 6936 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6937 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6938 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6939 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6940 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6941 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6942 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6943 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6944 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6945 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6946 * set device to PCI_D3hot state in PCI config space instead of setting it
6947 * according to the @msg provided by the PM.
3177 * 6948 *
3178 * Return code 6949 * Return code
3179 * 0 - driver suspended the device 6950 * 0 - driver suspended the device
3180 * Error otherwise 6951 * Error otherwise
3181 **/ 6952 **/
3182static int 6953static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6954lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6955{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6956 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6957 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6965,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6965 kthread_stop(phba->worker_thread);
3195 6966
3196 /* Disable interrupt from device */ 6967 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6968 lpfc_sli_disable_intr(phba);
3198 6969
3199 /* Save device state to PCI config space */ 6970 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6971 pci_save_state(pdev);
@@ -3204,25 +6975,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6975}
3205 6976
3206/** 6977/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6978 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6979 * @pdev: pointer to PCI device
3209 * 6980 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6981 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6982 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6983 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6984 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6985 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6986 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6987 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6988 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6989 * the device will be set to PCI_D0 directly in PCI config space before
6990 * restoring the state.
3219 * 6991 *
3220 * Return code 6992 * Return code
3221 * 0 - driver suspended the device 6993 * 0 - driver suspended the device
3222 * Error otherwise 6994 * Error otherwise
3223 **/ 6995 **/
3224static int 6996static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6997lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 6998{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6999 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7022,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7022 }
3251 7023
3252 /* Configure and enable interrupt */ 7024 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7025 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7026 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7028 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7041,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7041}
3270 7042
3271/** 7043/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7044 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7045 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7046 * @state: the current PCI connection state.
3275 * 7047 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7048 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7049 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7050 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7051 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7052 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7053 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7054 * as desired.
3282 * 7055 *
3283 * Return codes 7056 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7057 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7058 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7059 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7060static pci_ers_result_t
3288 pci_channel_state_t state) 7061lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7062{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7063 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7064 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7085,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7085 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7086
3314 /* Disable interrupt */ 7087 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7088 lpfc_sli_disable_intr(phba);
3316 7089
3317 /* Request a slot reset. */ 7090 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7091 return PCI_ERS_RESULT_NEED_RESET;
3319} 7092}
3320 7093
3321/** 7094/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7095 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7096 * @pdev: pointer to PCI device.
3324 * 7097 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7098 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7099 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7100 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7101 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7102 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7103 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7104 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7105 * enable the interrupt, but it will just put the HBA to offline state
7106 * without passing any I/O traffic.
3333 * 7107 *
3334 * Return codes 7108 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7109 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7110 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7111 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7112static pci_ers_result_t
7113lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7114{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7115 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7116 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7129,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7129 pci_set_master(pdev);
3355 7130
3356 spin_lock_irq(&phba->hbalock); 7131 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7132 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7133 spin_unlock_irq(&phba->hbalock);
3359 7134
3360 /* Configure and enable interrupt */ 7135 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7136 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7137 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7139 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7153,715 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7153}
3379 7154
3380/** 7155/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7156 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7157 * @pdev: pointer to PCI device
3383 * 7158 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7159 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7160 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7161 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7162 * error recovery. After this call, traffic can start to flow from this device
7163 * again.
3388 */ 7164 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7165static void
7166lpfc_io_resume_s3(struct pci_dev *pdev)
7167{
7168 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7169 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7170
7171 lpfc_online(phba);
7172}
7173
7174/**
7175 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7176 * @phba: pointer to lpfc hba data structure.
7177 *
7178 * returns the number of ELS/CT IOCBs to reserve
7179 **/
7180int
7181lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7182{
7183 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7184
7185 if (phba->sli_rev == LPFC_SLI_REV4) {
7186 if (max_xri <= 100)
7187 return 4;
7188 else if (max_xri <= 256)
7189 return 8;
7190 else if (max_xri <= 512)
7191 return 16;
7192 else if (max_xri <= 1024)
7193 return 32;
7194 else
7195 return 48;
7196 } else
7197 return 0;
7198}
7199
7200/**
7201 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7202 * @pdev: pointer to PCI device
7203 * @pid: pointer to PCI device identifier
7204 *
7205 * This routine is called from the kernel's PCI subsystem to device with
7206 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7207 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7208 * information of the device and driver to see if the driver state that it
7209 * can support this kind of device. If the match is successful, the driver
7210 * core invokes this routine. If this routine determines it can claim the HBA,
7211 * it does all the initialization that it needs to do to handle the HBA
7212 * properly.
7213 *
7214 * Return code
7215 * 0 - driver can claim the device
7216 * negative value - driver can not claim the device
7217 **/
7218static int __devinit
7219lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7220{
7221 struct lpfc_hba *phba;
7222 struct lpfc_vport *vport = NULL;
7223 int error;
7224 uint32_t cfg_mode, intr_mode;
7225 int mcnt;
7226
7227 /* Allocate memory for HBA structure */
7228 phba = lpfc_hba_alloc(pdev);
7229 if (!phba)
7230 return -ENOMEM;
7231
7232 /* Perform generic PCI device enabling operation */
7233 error = lpfc_enable_pci_dev(phba);
7234 if (error) {
7235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7236 "1409 Failed to enable pci device.\n");
7237 goto out_free_phba;
7238 }
7239
7240 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7241 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7242 if (error)
7243 goto out_disable_pci_dev;
7244
7245 /* Set up SLI-4 specific device PCI memory space */
7246 error = lpfc_sli4_pci_mem_setup(phba);
7247 if (error) {
7248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7249 "1410 Failed to set up pci memory space.\n");
7250 goto out_disable_pci_dev;
7251 }
7252
7253 /* Set up phase-1 common device driver resources */
7254 error = lpfc_setup_driver_resource_phase1(phba);
7255 if (error) {
7256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7257 "1411 Failed to set up driver resource.\n");
7258 goto out_unset_pci_mem_s4;
7259 }
7260
7261 /* Set up SLI-4 Specific device driver resources */
7262 error = lpfc_sli4_driver_resource_setup(phba);
7263 if (error) {
7264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7265 "1412 Failed to set up driver resource.\n");
7266 goto out_unset_pci_mem_s4;
7267 }
7268
7269 /* Initialize and populate the iocb list per host */
7270 error = lpfc_init_iocb_list(phba,
7271 phba->sli4_hba.max_cfg_param.max_xri);
7272 if (error) {
7273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7274 "1413 Failed to initialize iocb list.\n");
7275 goto out_unset_driver_resource_s4;
7276 }
7277
7278 /* Set up common device driver resources */
7279 error = lpfc_setup_driver_resource_phase2(phba);
7280 if (error) {
7281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7282 "1414 Failed to set up driver resource.\n");
7283 goto out_free_iocb_list;
7284 }
7285
7286 /* Create SCSI host to the physical port */
7287 error = lpfc_create_shost(phba);
7288 if (error) {
7289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7290 "1415 Failed to create scsi host.\n");
7291 goto out_unset_driver_resource;
7292 }
7293
7294 /* Configure sysfs attributes */
7295 vport = phba->pport;
7296 error = lpfc_alloc_sysfs_attr(vport);
7297 if (error) {
7298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7299 "1416 Failed to allocate sysfs attr\n");
7300 goto out_destroy_shost;
7301 }
7302
7303 /* Now, trying to enable interrupt and bring up the device */
7304 cfg_mode = phba->cfg_use_msi;
7305 while (true) {
7306 /* Put device to a known state before enabling interrupt */
7307 lpfc_stop_port(phba);
7308 /* Configure and enable interrupt */
7309 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7310 if (intr_mode == LPFC_INTR_ERROR) {
7311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7312 "0426 Failed to enable interrupt.\n");
7313 error = -ENODEV;
7314 goto out_free_sysfs_attr;
7315 }
7316 /* Set up SLI-4 HBA */
7317 if (lpfc_sli4_hba_setup(phba)) {
7318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7319 "1421 Failed to set up hba\n");
7320 error = -ENODEV;
7321 goto out_disable_intr;
7322 }
7323
7324 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7325 if (intr_mode != 0)
7326 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7327 LPFC_ACT_INTR_CNT);
7328
7329 /* Check active interrupts received only for MSI/MSI-X */
7330 if (intr_mode == 0 ||
7331 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7332 /* Log the current active interrupt mode */
7333 phba->intr_mode = intr_mode;
7334 lpfc_log_intr_mode(phba, intr_mode);
7335 break;
7336 }
7337 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7338 "0451 Configure interrupt mode (%d) "
7339 "failed active interrupt test.\n",
7340 intr_mode);
7341 /* Unset the preivous SLI-4 HBA setup */
7342 lpfc_sli4_unset_hba(phba);
7343 /* Try next level of interrupt mode */
7344 cfg_mode = --intr_mode;
7345 }
7346
7347 /* Perform post initialization setup */
7348 lpfc_post_init_setup(phba);
7349
7350 return 0;
7351
7352out_disable_intr:
7353 lpfc_sli4_disable_intr(phba);
7354out_free_sysfs_attr:
7355 lpfc_free_sysfs_attr(vport);
7356out_destroy_shost:
7357 lpfc_destroy_shost(phba);
7358out_unset_driver_resource:
7359 lpfc_unset_driver_resource_phase2(phba);
7360out_free_iocb_list:
7361 lpfc_free_iocb_list(phba);
7362out_unset_driver_resource_s4:
7363 lpfc_sli4_driver_resource_unset(phba);
7364out_unset_pci_mem_s4:
7365 lpfc_sli4_pci_mem_unset(phba);
7366out_disable_pci_dev:
7367 lpfc_disable_pci_dev(phba);
7368out_free_phba:
7369 lpfc_hba_free(phba);
7370 return error;
7371}
7372
7373/**
7374 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7375 * @pdev: pointer to PCI device
7376 *
7377 * This routine is called from the kernel's PCI subsystem to device with
7378 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7379 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7380 * device to be removed from the PCI subsystem properly.
7381 **/
7382static void __devexit
7383lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7384{
7385 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7386 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7387 struct lpfc_vport **vports;
7388 struct lpfc_hba *phba = vport->phba;
7389 int i;
7390
7391 /* Mark the device unloading flag */
7392 spin_lock_irq(&phba->hbalock);
7393 vport->load_flag |= FC_UNLOADING;
7394 spin_unlock_irq(&phba->hbalock);
7395
7396 /* Free the HBA sysfs attributes */
7397 lpfc_free_sysfs_attr(vport);
7398
7399 /* Release all the vports against this physical port */
7400 vports = lpfc_create_vport_work_array(phba);
7401 if (vports != NULL)
7402 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7403 fc_vport_terminate(vports[i]->fc_vport);
7404 lpfc_destroy_vport_work_array(phba, vports);
7405
7406 /* Remove FC host and then SCSI host with the physical port */
7407 fc_remove_host(shost);
7408 scsi_remove_host(shost);
7409
7410 /* Perform cleanup on the physical port */
7411 lpfc_cleanup(vport);
7412
7413 /*
7414 * Bring down the SLI Layer. This step disables all interrupts,
7415 * clears the rings, discards all mailbox commands, and resets
7416 * the HBA FCoE function.
7417 */
7418 lpfc_debugfs_terminate(vport);
7419 lpfc_sli4_hba_unset(phba);
7420
7421 spin_lock_irq(&phba->hbalock);
7422 list_del_init(&vport->listentry);
7423 spin_unlock_irq(&phba->hbalock);
7424
7425 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7426 * buffers are released to their corresponding pools here.
7427 */
7428 lpfc_scsi_free(phba);
7429 lpfc_sli4_driver_resource_unset(phba);
7430
7431 /* Unmap adapter Control and Doorbell registers */
7432 lpfc_sli4_pci_mem_unset(phba);
7433
7434 /* Release PCI resources and disable device's PCI function */
7435 scsi_host_put(shost);
7436 lpfc_disable_pci_dev(phba);
7437
7438 /* Finally, free the driver's device data structure */
7439 lpfc_hba_free(phba);
7440
7441 return;
7442}
7443
7444/**
7445 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7446 * @pdev: pointer to PCI device
7447 * @msg: power management message
7448 *
7449 * This routine is called from the kernel's PCI subsystem to support system
7450 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7451 * this method, it quiesces the device by stopping the driver's worker
7452 * thread for the device, turning off device's interrupt and DMA, and bring
7453 * the device offline. Note that as the driver implements the minimum PM
7454 * requirements to a power-aware driver's PM support for suspend/resume -- all
7455 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7456 * method call will be treated as SUSPEND and the driver will fully
7457 * reinitialize its device during resume() method call, the driver will set
7458 * device to PCI_D3hot state in PCI config space instead of setting it
7459 * according to the @msg provided by the PM.
7460 *
7461 * Return code
7462 * 0 - driver suspended the device
7463 * Error otherwise
7464 **/
7465static int
7466lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7467{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7468 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7469 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7470
7471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7472 "0298 PCI device Power Management suspend.\n");
7473
7474 /* Bring down the device */
7475 lpfc_offline_prep(phba);
7476 lpfc_offline(phba);
7477 kthread_stop(phba->worker_thread);
7478
7479 /* Disable interrupt from device */
7480 lpfc_sli4_disable_intr(phba);
7481
7482 /* Save device state to PCI config space */
7483 pci_save_state(pdev);
7484 pci_set_power_state(pdev, PCI_D3hot);
7485
7486 return 0;
7487}
7488
7489/**
7490 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7491 * @pdev: pointer to PCI device
7492 *
7493 * This routine is called from the kernel's PCI subsystem to support system
7494 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7495 * this method, it restores the device's PCI config space state and fully
7496 * reinitializes the device and brings it online. Note that as the driver
7497 * implements the minimum PM requirements to a power-aware driver's PM for
7498 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7499 * to the suspend() method call will be treated as SUSPEND and the driver
7500 * will fully reinitialize its device during resume() method call, the device
7501 * will be set to PCI_D0 directly in PCI config space before restoring the
7502 * state.
7503 *
7504 * Return code
7505 * 0 - driver suspended the device
7506 * Error otherwise
7507 **/
7508static int
7509lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7510{
7511 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7512 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7513 uint32_t intr_mode;
7514 int error;
7515
7516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7517 "0292 PCI device Power Management resume.\n");
7518
7519 /* Restore device state from PCI config space */
7520 pci_set_power_state(pdev, PCI_D0);
7521 pci_restore_state(pdev);
7522 if (pdev->is_busmaster)
7523 pci_set_master(pdev);
7524
7525 /* Startup the kernel thread for this host adapter. */
7526 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7527 "lpfc_worker_%d", phba->brd_no);
7528 if (IS_ERR(phba->worker_thread)) {
7529 error = PTR_ERR(phba->worker_thread);
7530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7531 "0293 PM resume failed to start worker "
7532 "thread: error=x%x.\n", error);
7533 return error;
7534 }
7535
7536 /* Configure and enable interrupt */
7537 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7538 if (intr_mode == LPFC_INTR_ERROR) {
7539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7540 "0294 PM resume Failed to enable interrupt\n");
7541 return -EIO;
7542 } else
7543 phba->intr_mode = intr_mode;
7544
7545 /* Restart HBA and bring it online */
7546 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7547 lpfc_online(phba);
7548
7549 /* Log the current active interrupt mode */
7550 lpfc_log_intr_mode(phba, phba->intr_mode);
7551
7552 return 0;
7553}
7554
7555/**
7556 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7557 * @pdev: pointer to PCI device.
7558 * @state: the current PCI connection state.
7559 *
7560 * This routine is called from the PCI subsystem for error handling to device
7561 * with SLI-4 interface spec. This function is called by the PCI subsystem
7562 * after a PCI bus error affecting this device has been detected. When this
7563 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7564 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7565 * for the PCI subsystem to perform proper recovery as desired.
7566 *
7567 * Return codes
7568 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7569 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7570 **/
7571static pci_ers_result_t
7572lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7573{
7574 return PCI_ERS_RESULT_NEED_RESET;
7575}
7576
7577/**
7578 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7579 * @pdev: pointer to PCI device.
7580 *
7581 * This routine is called from the PCI subsystem for error handling to device
7582 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7583 * restart the PCI card from scratch, as if from a cold-boot. During the
7584 * PCI subsystem error recovery, after the driver returns
7585 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7586 * recovery and then call this routine before calling the .resume method to
7587 * recover the device. This function will initialize the HBA device, enable
7588 * the interrupt, but it will just put the HBA to offline state without
7589 * passing any I/O traffic.
7590 *
7591 * Return codes
7592 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7593 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7594 */
7595static pci_ers_result_t
7596lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7597{
7598 return PCI_ERS_RESULT_RECOVERED;
7599}
7600
7601/**
7602 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7603 * @pdev: pointer to PCI device
7604 *
7605 * This routine is called from the PCI subsystem for error handling to device
7606 * with SLI-4 interface spec. It is called when kernel error recovery tells
7607 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7608 * error recovery. After this call, traffic can start to flow from this device
7609 * again.
7610 **/
7611static void
7612lpfc_io_resume_s4(struct pci_dev *pdev)
7613{
7614 return;
7615}
7616
7617/**
7618 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7619 * @pdev: pointer to PCI device
7620 * @pid: pointer to PCI device identifier
7621 *
7622 * This routine is to be registered to the kernel's PCI subsystem. When an
7623 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7624 * at PCI device-specific information of the device and driver to see if the
7625 * driver state that it can support this kind of device. If the match is
7626 * successful, the driver core invokes this routine. This routine dispatches
7627 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7628 * do all the initialization that it needs to do to handle the HBA device
7629 * properly.
7630 *
7631 * Return code
7632 * 0 - driver can claim the device
7633 * negative value - driver can not claim the device
7634 **/
7635static int __devinit
7636lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7637{
7638 int rc;
7639 uint16_t dev_id;
7640
7641 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7642 return -ENODEV;
7643
7644 switch (dev_id) {
7645 case PCI_DEVICE_ID_TIGERSHARK:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,8 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7944 { 0 }
3473}; 7945};
3474 7946
@@ -3486,7 +7958,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7958 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7959 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7960 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7961 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7962 .err_handler = &lpfc_err_handler,
3491}; 7963};
3492 7964
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b012..954ba57970a3 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc2127..3423571dd1b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,478 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 memset(viraddr, 0, PAGE_SIZE);
1635 mbox->sge_array->addr[pagen] = viraddr;
1636 /* Keep the first page for later sub-header construction */
1637 if (pagen == 0)
1638 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1639 resid_len = length - alloc_len;
1640 if (resid_len > PAGE_SIZE) {
1641 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1642 PAGE_SIZE);
1643 alloc_len += PAGE_SIZE;
1644 } else {
1645 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1646 resid_len);
1647 alloc_len = length;
1648 }
1649 }
1650
1651 /* Set up main header fields in mailbox command */
1652 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1653 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1654
1655 /* Set up sub-header fields into the first page */
1656 if (pagen > 0) {
1657 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1658 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1659 cfg_shdr->request.request_length =
1660 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1661 }
1662 /* The sub-header is in DMA memory, which needs endian converstion */
1663 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1664 sizeof(union lpfc_sli4_cfg_shdr));
1665
1666 return alloc_len;
1667}
1668
1669/**
1670 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1671 * @phba: pointer to lpfc hba data structure.
1672 * @mbox: pointer to lpfc mbox command.
1673 *
1674 * This routine gets the opcode from a SLI4 specific mailbox command for
1675 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1676 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1677 * returned.
1678 **/
1679uint8_t
1680lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1681{
1682 struct lpfc_mbx_sli4_config *sli4_cfg;
1683 union lpfc_sli4_cfg_shdr *cfg_shdr;
1684
1685 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1686 return 0;
1687 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1688
1689 /* For embedded mbox command, get opcode from embedded sub-header*/
1690 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1691 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1692 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1693 }
1694
1695 /* For non-embedded mbox command, get opcode from first dma page */
1696 if (unlikely(!mbox->sge_array))
1697 return 0;
1698 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1699 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1700}
1701
1702/**
1703 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1704 * @mboxq: pointer to lpfc mbox command.
1705 *
1706 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1707 * mailbox command.
1708 **/
1709void
1710lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1711{
1712 /* Set up SLI4 mailbox command header fields */
1713 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1714 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1715
1716 /* Set up host requested features. */
1717 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1718
1719 if (phba->cfg_enable_fip)
1720 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1721 else
1722 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1723
1724 /* Enable DIF (block guard) only if configured to do so. */
1725 if (phba->cfg_enable_bg)
1726 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1727
1728 /* Enable NPIV only if configured to do so. */
1729 if (phba->max_vpi && phba->cfg_enable_npiv)
1730 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1731
1732 return;
1733}
1734
1735/**
1736 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1737 * @mbox: pointer to lpfc mbox command to initialize.
1738 * @vport: Vport associated with the VF.
1739 *
1740 * This routine initializes @mbox to all zeros and then fills in the mailbox
1741 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1742 * in the context of an FCF. The driver issues this command to setup a VFI
1743 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1744 * REG_VFI after a successful VSAN login.
1745 **/
1746void
1747lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1748{
1749 struct lpfc_mbx_init_vfi *init_vfi;
1750
1751 memset(mbox, 0, sizeof(*mbox));
1752 init_vfi = &mbox->u.mqe.un.init_vfi;
1753 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1754 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1755 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1756 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1757 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1758}
1759
1760/**
1761 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1762 * @mbox: pointer to lpfc mbox command to initialize.
1763 * @vport: vport associated with the VF.
1764 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1765 *
1766 * This routine initializes @mbox to all zeros and then fills in the mailbox
1767 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1768 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1769 * fabrics identified by VFI in the context of an FCF.
1770 **/
1771void
1772lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1773{
1774 struct lpfc_mbx_reg_vfi *reg_vfi;
1775
1776 memset(mbox, 0, sizeof(*mbox));
1777 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1778 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1779 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1780 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1781 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1782 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1783 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1784 reg_vfi->bde.addrLow = putPaddrLow(phys);
1785 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1786 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1787 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1788}
1789
1790/**
1791 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1792 * @mbox: pointer to lpfc mbox command to initialize.
1793 * @vpi: VPI to be initialized.
1794 *
1795 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1796 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1797 * with the virtual N Port. The SLI Host issues this command before issuing a
1798 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1799 * successful virtual NPort login.
1800 **/
1801void
1802lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1803{
1804 memset(mbox, 0, sizeof(*mbox));
1805 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1806 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1807}
1808
1809/**
1810 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1811 * @mbox: pointer to lpfc mbox command to initialize.
1812 * @vfi: VFI to be unregistered.
1813 *
1814 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1815 * (logical NPort) into the inactive state. The SLI Host must have logged out
1816 * and unregistered all remote N_Ports to abort any activity on the virtual
1817 * fabric. The SLI Port posts the mailbox response after marking the virtual
1818 * fabric inactive.
1819 **/
1820void
1821lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1822{
1823 memset(mbox, 0, sizeof(*mbox));
1824 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1825 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1826}
1827
1828/**
1829 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1830 * @phba: pointer to the hba structure containing.
1831 * @mbox: pointer to lpfc mbox command to initialize.
1832 *
1833 * This function create a SLI4 dump mailbox command to dump FCoE
1834 * parameters stored in region 23.
1835 **/
1836int
1837lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1838 struct lpfcMboxq *mbox)
1839{
1840 struct lpfc_dmabuf *mp = NULL;
1841 MAILBOX_t *mb;
1842
1843 memset(mbox, 0, sizeof(*mbox));
1844 mb = &mbox->u.mb;
1845
1846 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1847 if (mp)
1848 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1849
1850 if (!mp || !mp->virt) {
1851 kfree(mp);
1852 /* dump_fcoe_param failed to allocate memory */
1853 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1854 "2569 lpfc_dump_fcoe_param: memory"
1855 " allocation failed \n");
1856 return 1;
1857 }
1858
1859 memset(mp->virt, 0, LPFC_BPL_SIZE);
1860 INIT_LIST_HEAD(&mp->list);
1861
1862 /* save address for completion */
1863 mbox->context1 = (uint8_t *) mp;
1864
1865 mb->mbxCommand = MBX_DUMP_MEMORY;
1866 mb->un.varDmp.type = DMP_NV_PARAMS;
1867 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1868 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1869 mb->un.varWords[3] = putPaddrLow(mp->phys);
1870 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1871 return 0;
1872}
1873
1874/**
1875 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1876 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1877 * @mbox: pointer to lpfc mbox command to initialize.
1878 *
1879 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1880 * SLI Host uses the command to activate an FCF after it has acquired FCF
1881 * information via a READ_FCF mailbox command. This mailbox command also is used
1882 * to indicate where received unsolicited frames from this FCF will be sent. By
1883 * default this routine will set up the FCF to forward all unsolicited frames
1884 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1885 * more complicated setups.
1886 **/
1887void
1888lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1889{
1890 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1891
1892 memset(mbox, 0, sizeof(*mbox));
1893 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1894 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1895 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1896 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1897 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1898 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1899 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1900 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1901 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1902 (~phba->fcf.addr_mode) & 0x3);
1903 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1904 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1905 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1906 }
1907}
1908
1909/**
1910 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1911 * @mbox: pointer to lpfc mbox command to initialize.
1912 * @fcfi: FCFI to be unregistered.
1913 *
1914 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1915 * The SLI Host uses the command to inactivate an FCFI.
1916 **/
1917void
1918lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1919{
1920 memset(mbox, 0, sizeof(*mbox));
1921 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1922 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1923}
1924
1925/**
1926 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1927 * @mbox: pointer to lpfc mbox command to initialize.
1928 * @ndlp: The nodelist structure that describes the RPI to resume.
1929 *
1930 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1931 * link event.
1932 **/
1933void
1934lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1935{
1936 struct lpfc_mbx_resume_rpi *resume_rpi;
1937
1938 memset(mbox, 0, sizeof(*mbox));
1939 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1940 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1941 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1942 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1943 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1944 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1945 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1946}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a976733398..e198c917c13e 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41c..3e74136f1ede 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_DID == Fabric_DID) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c7..da59c4f0168f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -112,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
112} 116}
113 117
114/** 118/**
119 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
120 * @phba: Pointer to HBA object.
121 * @lpfc_cmd: lpfc scsi command object pointer.
122 *
123 * This function is called from the lpfc_prep_task_mgmt_cmd function to
124 * set the last bit in the response sge entry.
125 **/
126static void
127lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
128 struct lpfc_scsi_buf *lpfc_cmd)
129{
130 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
131 if (sgl) {
132 sgl += 1;
133 sgl->word2 = le32_to_cpu(sgl->word2);
134 bf_set(lpfc_sli4_sge_last, sgl, 1);
135 sgl->word2 = cpu_to_le32(sgl->word2);
136 }
137}
138
139/**
115 * lpfc_update_stats - Update statistical data for the command completion 140 * lpfc_update_stats - Update statistical data for the command completion
116 * @phba: Pointer to HBA object. 141 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer. 142 * @lpfc_cmd: lpfc scsi command object pointer.
@@ -325,7 +350,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 350
326 vports = lpfc_create_vport_work_array(phba); 351 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 352 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 353 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 354 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 355 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 356 new_queue_depth =
@@ -379,7 +404,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 404
380 vports = lpfc_create_vport_work_array(phba); 405 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 406 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 407 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 408 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 409 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 410 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +452,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 452
428 vports = lpfc_create_vport_work_array(phba); 453 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 454 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 455 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 456 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 457 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 458 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +463,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 463}
439 464
440/** 465/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 466 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 467 * @vport: The virtual port for which this call being executed.
468 * @num_to_allocate: The requested number of buffers to allocate.
443 * 469 *
444 * This routine allocates a scsi buffer, which contains all the necessary 470 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 471 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 472 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 473 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 474 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 475 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 476 *
451 * Return codes: 477 * Return codes:
452 * NULL - Error 478 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 479 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 480 **/
455static struct lpfc_scsi_buf * 481static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 482lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 483{
458 struct lpfc_hba *phba = vport->phba; 484 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 485 struct lpfc_scsi_buf *psb;
@@ -463,107 +489,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 489 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 490 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 491 uint16_t iotag;
492 int bcnt;
466 493
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 494 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 495 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 496 if (!psb)
497 break;
498
499 /*
500 * Get memory from the pci pool to map the virt space to pci
501 * bus space for an I/O. The DMA buffer includes space for the
502 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
503 * necessary to support the sg_tablesize.
504 */
505 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
506 GFP_KERNEL, &psb->dma_handle);
507 if (!psb->data) {
508 kfree(psb);
509 break;
510 }
511
512 /* Initialize virtual ptrs to dma_buf region. */
513 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
514
515 /* Allocate iotag for psb->cur_iocbq. */
516 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
517 if (iotag == 0) {
518 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
519 psb->data, psb->dma_handle);
520 kfree(psb);
521 break;
522 }
523 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
524
525 psb->fcp_cmnd = psb->data;
526 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
527 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
528 sizeof(struct fcp_rsp);
529
530 /* Initialize local short-hand pointers. */
531 bpl = psb->fcp_bpl;
532 pdma_phys_fcp_cmd = psb->dma_handle;
533 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
534 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
535 sizeof(struct fcp_rsp);
536
537 /*
538 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
539 * are sg list bdes. Initialize the first two and leave the
540 * rest for queuecommand.
541 */
542 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
543 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
544 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
545 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
546 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
547
548 /* Setup the physical region for the FCP RSP */
549 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
550 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
551 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
552 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
553 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
554
555 /*
556 * Since the IOCB for the FCP I/O is built into this
557 * lpfc_scsi_buf, initialize it with all known data now.
558 */
559 iocb = &psb->cur_iocbq.iocb;
560 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
561 if ((phba->sli_rev == 3) &&
562 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
563 /* fill in immediate fcp command BDE */
564 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
565 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
566 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
567 unsli3.fcp_ext.icd);
568 iocb->un.fcpi64.bdl.addrHigh = 0;
569 iocb->ulpBdeCount = 0;
570 iocb->ulpLe = 0;
571 /* fill in responce BDE */
572 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
573 BUFF_TYPE_BDE_64;
574 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
575 sizeof(struct fcp_rsp);
576 iocb->unsli3.fcp_ext.rbde.addrLow =
577 putPaddrLow(pdma_phys_fcp_rsp);
578 iocb->unsli3.fcp_ext.rbde.addrHigh =
579 putPaddrHigh(pdma_phys_fcp_rsp);
580 } else {
581 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
582 iocb->un.fcpi64.bdl.bdeSize =
583 (2 * sizeof(struct ulp_bde64));
584 iocb->un.fcpi64.bdl.addrLow =
585 putPaddrLow(pdma_phys_bpl);
586 iocb->un.fcpi64.bdl.addrHigh =
587 putPaddrHigh(pdma_phys_bpl);
588 iocb->ulpBdeCount = 1;
589 iocb->ulpLe = 1;
590 }
591 iocb->ulpClass = CLASS3;
592 psb->status = IOSTAT_SUCCESS;
593 /* Put it back into the SCSI buffer list */
594 lpfc_release_scsi_buf_s4(phba, psb);
470 595
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 596 }
483 597
484 /* Initialize virtual ptrs to dma_buf region. */ 598 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 599}
486 600
487 /* Allocate iotag for psb->cur_iocbq. */ 601/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 602 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 603 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 604 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 605 *
492 kfree (psb); 606 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 607 * FCP aborted xri.
608 **/
609void
610lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
611 struct sli4_wcqe_xri_aborted *axri)
612{
613 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
614 struct lpfc_scsi_buf *psb, *next_psb;
615 unsigned long iflag = 0;
616
617 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
618 list_for_each_entry_safe(psb, next_psb,
619 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
620 if (psb->cur_iocbq.sli4_xritag == xri) {
621 list_del(&psb->list);
622 psb->status = IOSTAT_SUCCESS;
623 spin_unlock_irqrestore(
624 &phba->sli4_hba.abts_scsi_buf_list_lock,
625 iflag);
626 lpfc_release_scsi_buf_s4(phba, psb);
627 return;
628 }
494 } 629 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 630 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
631 iflag);
632}
496 633
497 psb->fcp_cmnd = psb->data; 634/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 635 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 636 * @phba: pointer to lpfc hba data structure.
500 sizeof(struct fcp_rsp); 637 *
638 * This routine walks the list of scsi buffers that have been allocated and
639 * repost them to the HBA by using SGL block post. This is needed after a
640 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
641 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
642 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
643 *
644 * Returns: 0 = success, non-zero failure.
645 **/
646int
647lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
648{
649 struct lpfc_scsi_buf *psb;
650 int index, status, bcnt = 0, rcnt = 0, rc = 0;
651 LIST_HEAD(sblist);
652
653 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
654 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
655 if (psb) {
656 /* Remove from SCSI buffer list */
657 list_del(&psb->list);
658 /* Add it to a local SCSI buffer list */
659 list_add_tail(&psb->list, &sblist);
660 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
661 bcnt = rcnt;
662 rcnt = 0;
663 }
664 } else
665 /* A hole present in the XRI array, need to skip */
666 bcnt = rcnt;
501 667
502 /* Initialize local short-hand pointers. */ 668 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
503 bpl = psb->fcp_bpl; 669 /* End of XRI array for SCSI buffer, complete */
504 pdma_phys_fcp_cmd = psb->dma_handle; 670 bcnt = rcnt;
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508 671
509 /* 672 /* Continue until collect up to a nembed page worth of sgls */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 673 if (bcnt == 0)
511 * list bdes. Initialize the first two and leave the rest for 674 continue;
512 * queuecommand. 675 /* Now, post the SCSI buffer list sgls as a block */
513 */ 676 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 677 /* Reset SCSI buffer count for next round of posting */
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 678 bcnt = 0;
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 679 while (!list_empty(&sblist)) {
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 680 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 681 list);
519 682 if (status) {
520 /* Setup the physical region for the FCP RSP */ 683 /* Put this back on the abort scsi list */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 684 psb->status = IOSTAT_LOCAL_REJECT;
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 685 psb->result = IOERR_ABORT_REQUESTED;
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 686 rc++;
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 687 } else
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 688 psb->status = IOSTAT_SUCCESS;
689 /* Put it back into the SCSI buffer list */
690 lpfc_release_scsi_buf_s4(phba, psb);
691 }
692 }
693 return rc;
694}
526 695
527 /* 696/**
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 697 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
529 * initialize it with all known data now. 698 * @vport: The virtual port for which this call being executed.
530 */ 699 * @num_to_allocate: The requested number of buffers to allocate.
531 iocb = &psb->cur_iocbq.iocb; 700 *
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 701 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
533 if ((phba->sli_rev == 3) && 702 * the scsi buffer contains all the necessary information needed to initiate
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 703 * a SCSI I/O.
535 /* fill in immediate fcp command BDE */ 704 *
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 705 * Return codes:
706 * int - number of scsi buffers that were allocated.
707 * 0 = failure, less than num_to_alloc is a partial failure.
708 **/
709static int
710lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
711{
712 struct lpfc_hba *phba = vport->phba;
713 struct lpfc_scsi_buf *psb;
714 struct sli4_sge *sgl;
715 IOCB_t *iocb;
716 dma_addr_t pdma_phys_fcp_cmd;
717 dma_addr_t pdma_phys_fcp_rsp;
718 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
719 uint16_t iotag, last_xritag = NO_XRI;
720 int status = 0, index;
721 int bcnt;
722 int non_sequential_xri = 0;
723 int rc = 0;
724 LIST_HEAD(sblist);
725
726 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
727 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
728 if (!psb)
729 break;
730
731 /*
732 * Get memory from the pci pool to map the virt space to pci bus
733 * space for an I/O. The DMA buffer includes space for the
734 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
735 * necessary to support the sg_tablesize.
736 */
737 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
738 GFP_KERNEL, &psb->dma_handle);
739 if (!psb->data) {
740 kfree(psb);
741 break;
742 }
743
744 /* Initialize virtual ptrs to dma_buf region. */
745 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
746
747 /* Allocate iotag for psb->cur_iocbq. */
748 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
749 if (iotag == 0) {
750 kfree(psb);
751 break;
752 }
753
754 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
755 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
756 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
757 psb->data, psb->dma_handle);
758 kfree(psb);
759 break;
760 }
761 if (last_xritag != NO_XRI
762 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
763 non_sequential_xri = 1;
764 } else
765 list_add_tail(&psb->list, &sblist);
766 last_xritag = psb->cur_iocbq.sli4_xritag;
767
768 index = phba->sli4_hba.scsi_xri_cnt++;
769 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
770
771 psb->fcp_bpl = psb->data;
772 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
773 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
774 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
775 sizeof(struct fcp_cmnd));
776
777 /* Initialize local short-hand pointers. */
778 sgl = (struct sli4_sge *)psb->fcp_bpl;
779 pdma_phys_bpl = psb->dma_handle;
780 pdma_phys_fcp_cmd =
781 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
782 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
783 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
784
785 /*
786 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
787 * are sg list bdes. Initialize the first two and leave the
788 * rest for queuecommand.
789 */
790 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
791 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
792 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
793 bf_set(lpfc_sli4_sge_last, sgl, 0);
794 sgl->word2 = cpu_to_le32(sgl->word2);
795 sgl->word3 = cpu_to_le32(sgl->word3);
796 sgl++;
797
798 /* Setup the physical region for the FCP RSP */
799 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
800 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
801 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
802 bf_set(lpfc_sli4_sge_last, sgl, 1);
803 sgl->word2 = cpu_to_le32(sgl->word2);
804 sgl->word3 = cpu_to_le32(sgl->word3);
805
806 /*
807 * Since the IOCB for the FCP I/O is built into this
808 * lpfc_scsi_buf, initialize it with all known data now.
809 */
810 iocb = &psb->cur_iocbq.iocb;
811 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
812 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
813 /* setting the BLP size to 2 * sizeof BDE may not be correct.
814 * We are setting the bpl to point to out sgl. An sgl's
815 * entries are 16 bytes, a bpl entries are 12 bytes.
816 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 817 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 818 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 819 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 820 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 821 iocb->ulpLe = 1;
822 iocb->ulpClass = CLASS3;
823 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
824 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
825 else
826 pdma_phys_bpl1 = 0;
827 psb->dma_phys_bpl = pdma_phys_bpl;
828 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
829 if (non_sequential_xri) {
830 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
831 pdma_phys_bpl1,
832 psb->cur_iocbq.sli4_xritag);
833 if (status) {
834 /* Put this back on the abort scsi list */
835 psb->status = IOSTAT_LOCAL_REJECT;
836 psb->result = IOERR_ABORT_REQUESTED;
837 rc++;
838 } else
839 psb->status = IOSTAT_SUCCESS;
840 /* Put it back into the SCSI buffer list */
841 lpfc_release_scsi_buf_s4(phba, psb);
842 break;
843 }
844 }
845 if (bcnt) {
846 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
847 /* Reset SCSI buffer count for next round of posting */
848 while (!list_empty(&sblist)) {
849 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
850 list);
851 if (status) {
852 /* Put this back on the abort scsi list */
853 psb->status = IOSTAT_LOCAL_REJECT;
854 psb->result = IOERR_ABORT_REQUESTED;
855 rc++;
856 } else
857 psb->status = IOSTAT_SUCCESS;
858 /* Put it back into the SCSI buffer list */
859 lpfc_release_scsi_buf_s4(phba, psb);
860 }
558 } 861 }
559 iocb->ulpClass = CLASS3;
560 862
561 return psb; 863 return bcnt + non_sequential_xri - rc;
562} 864}
563 865
564/** 866/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 867 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 868 * @vport: The virtual port for which this call being executed.
869 * @num_to_allocate: The requested number of buffers to allocate.
870 *
871 * This routine wraps the actual SCSI buffer allocator function pointer from
872 * the lpfc_hba struct.
873 *
874 * Return codes:
875 * int - number of scsi buffers that were allocated.
876 * 0 = failure, less than num_to_alloc is a partial failure.
877 **/
878static inline int
879lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
880{
881 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
882}
883
884/**
885 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
886 * @phba: The HBA for which this call is being executed.
567 * 887 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 888 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 889 * and returns to caller.
@@ -591,7 +911,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 911}
592 912
593/** 913/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 914 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 915 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 916 * @psb: The scsi buffer which is being released.
597 * 917 *
@@ -599,7 +919,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 919 * lpfc_scsi_buf_list list.
600 **/ 920 **/
601static void 921static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 922lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 923{
604 unsigned long iflag = 0; 924 unsigned long iflag = 0;
605 925
@@ -610,21 +930,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 930}
611 931
612/** 932/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 933 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
934 * @phba: The Hba for which this call is being executed.
935 * @psb: The scsi buffer which is being released.
936 *
937 * This routine releases @psb scsi buffer by adding it to tail of @phba
938 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
939 * and cannot be reused for at least RA_TOV amount of time if it was
940 * aborted.
941 **/
942static void
943lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
944{
945 unsigned long iflag = 0;
946
947 if (psb->status == IOSTAT_LOCAL_REJECT
948 && psb->result == IOERR_ABORT_REQUESTED) {
949 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
950 iflag);
951 psb->pCmd = NULL;
952 list_add_tail(&psb->list,
953 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
954 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
955 iflag);
956 } else {
957
958 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
959 psb->pCmd = NULL;
960 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
961 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
962 }
963}
964
965/**
966 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
967 * @phba: The Hba for which this call is being executed.
968 * @psb: The scsi buffer which is being released.
969 *
970 * This routine releases @psb scsi buffer by adding it to tail of @phba
971 * lpfc_scsi_buf_list list.
972 **/
973static void
974lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
975{
976
977 phba->lpfc_release_scsi_buf(phba, psb);
978}
979
980/**
981 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 982 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 983 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 984 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 985 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 986 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 987 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 988 * IOCB fields which are dependent on scsi command request buffer.
621 * 989 *
622 * Return codes: 990 * Return codes:
623 * 1 - Error 991 * 1 - Error
624 * 0 - Success 992 * 0 - Success
625 **/ 993 **/
626static int 994static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 995lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 996{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 997 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 998 struct scatterlist *sgel = NULL;
@@ -827,8 +1195,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
827 * @reftag: out: ref tag (reference tag) 1195 * @reftag: out: ref tag (reference tag)
828 * 1196 *
829 * Description: 1197 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise, 1198 * Extract DIF parameters from the command if possible. Otherwise,
831 * use default paratmers. 1199 * use default parameters.
832 * 1200 *
833 **/ 1201 **/
834static inline void 1202static inline void
@@ -1312,10 +1680,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1680 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1681 uint64_t failing_sector = 0;
1314 1682
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1683 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1684 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1685 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1686 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1687
1320 spin_lock(&_dump_buf_lock); 1688 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1689 if (!_dump_buf_done) {
@@ -1412,6 +1780,133 @@ out:
1412} 1780}
1413 1781
1414/** 1782/**
1783 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1784 * @phba: The Hba for which this call is being executed.
1785 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1786 *
1787 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1788 * field of @lpfc_cmd for device with SLI-4 interface spec.
1789 *
1790 * Return codes:
1791 * 1 - Error
1792 * 0 - Success
1793 **/
1794static int
1795lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1796{
1797 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1798 struct scatterlist *sgel = NULL;
1799 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1800 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1801 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1802 dma_addr_t physaddr;
1803 uint32_t num_bde = 0;
1804 uint32_t dma_len;
1805 uint32_t dma_offset = 0;
1806 int nseg;
1807
1808 /*
1809 * There are three possibilities here - use scatter-gather segment, use
1810 * the single mapping, or neither. Start the lpfc command prep by
1811 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1812 * data bde entry.
1813 */
1814 if (scsi_sg_count(scsi_cmnd)) {
1815 /*
1816 * The driver stores the segment count returned from pci_map_sg
1817 * because this a count of dma-mappings used to map the use_sg
1818 * pages. They are not guaranteed to be the same for those
1819 * architectures that implement an IOMMU.
1820 */
1821
1822 nseg = scsi_dma_map(scsi_cmnd);
1823 if (unlikely(!nseg))
1824 return 1;
1825 sgl += 1;
1826 /* clear the last flag in the fcp_rsp map entry */
1827 sgl->word2 = le32_to_cpu(sgl->word2);
1828 bf_set(lpfc_sli4_sge_last, sgl, 0);
1829 sgl->word2 = cpu_to_le32(sgl->word2);
1830 sgl += 1;
1831
1832 lpfc_cmd->seg_cnt = nseg;
1833 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1834 printk(KERN_ERR "%s: Too many sg segments from "
1835 "dma_map_sg. Config %d, seg_cnt %d\n",
1836 __func__, phba->cfg_sg_seg_cnt,
1837 lpfc_cmd->seg_cnt);
1838 scsi_dma_unmap(scsi_cmnd);
1839 return 1;
1840 }
1841
1842 /*
1843 * The driver established a maximum scatter-gather segment count
1844 * during probe that limits the number of sg elements in any
1845 * single scsi command. Just run through the seg_cnt and format
1846 * the sge's.
1847 * When using SLI-3 the driver will try to fit all the BDEs into
1848 * the IOCB. If it can't then the BDEs get added to a BPL as it
1849 * does for SLI-2 mode.
1850 */
1851 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1852 physaddr = sg_dma_address(sgel);
1853 dma_len = sg_dma_len(sgel);
1854 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1855 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1856 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1857 if ((num_bde + 1) == nseg)
1858 bf_set(lpfc_sli4_sge_last, sgl, 1);
1859 else
1860 bf_set(lpfc_sli4_sge_last, sgl, 0);
1861 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1862 sgl->word2 = cpu_to_le32(sgl->word2);
1863 sgl->word3 = cpu_to_le32(sgl->word3);
1864 dma_offset += dma_len;
1865 sgl++;
1866 }
1867 } else {
1868 sgl += 1;
1869 /* clear the last flag in the fcp_rsp map entry */
1870 sgl->word2 = le32_to_cpu(sgl->word2);
1871 bf_set(lpfc_sli4_sge_last, sgl, 1);
1872 sgl->word2 = cpu_to_le32(sgl->word2);
1873 }
1874
1875 /*
1876 * Finish initializing those IOCB fields that are dependent on the
1877 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1878 * explicitly reinitialized.
1879 * all iocb memory resources are reused.
1880 */
1881 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1882
1883 /*
1884 * Due to difference in data length between DIF/non-DIF paths,
1885 * we need to set word 4 of IOCB here
1886 */
1887 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1888 return 0;
1889}
1890
1891/**
1892 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1893 * @phba: The Hba for which this call is being executed.
1894 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1895 *
1896 * This routine wraps the actual DMA mapping function pointer from the
1897 * lpfc_hba struct.
1898 *
1899 * Return codes:
1900 * 1 - Error
1901 * 0 - Success
1902 **/
1903static inline int
1904lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1905{
1906 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1907}
1908
1909/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1910 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1911 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1912 * @vport: Pointer to vport object.
@@ -1504,15 +1999,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1999}
1505 2000
1506/** 2001/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 2002 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
1508 * @phba: The Hba for which this call is being executed. 2003 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 2004 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 2005 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 2006 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 2007 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 2008 **/
1514static void 2009static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 2010lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 2011{
1517 /* 2012 /*
1518 * There are only two special cases to consider. (1) the scsi command 2013 * There are only two special cases to consider. (1) the scsi command
@@ -1676,7 +2171,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2171 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2172 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2173 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2174 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2175 *
1681 * This routine assigns scsi command result by looking into response IOCB 2176 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2177 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,13 +2452,13 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2452}
1958 2453
1959/** 2454/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2455 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
1961 * @vport: The virtual port for which this call is being executed. 2456 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2457 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2458 * @pnode: Pointer to lpfc_nodelist.
1964 * 2459 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2460 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2461 * to transfer for device with SLI3 interface spec.
1967 **/ 2462 **/
1968static void 2463static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2464lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
@@ -2013,8 +2508,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2508 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2509 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2510 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2511 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2512 iocb_cmd->un.fcpi.fcpi_parm = 0;
2513 iocb_cmd->ulpPU = 0;
2514 } else
2515 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2516 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2517 phba->fc4OutputRequests++;
2020 } else { 2518 } else {
@@ -2051,13 +2549,14 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2549}
2052 2550
2053/** 2551/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2552 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2553 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2554 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2555 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2556 * @task_mgmt_cmd: SCSI task management command.
2059 * 2557 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2558 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2559 * for device with SLI-3 interface spec.
2061 * 2560 *
2062 * Return codes: 2561 * Return codes:
2063 * 0 - Error 2562 * 0 - Error
@@ -2106,14 +2605,56 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2106 * The driver will provide the timeout mechanism. 2605 * The driver will provide the timeout mechanism.
2107 */ 2606 */
2108 piocb->ulpTimeout = 0; 2607 piocb->ulpTimeout = 0;
2109 } else { 2608 } else
2110 piocb->ulpTimeout = lpfc_cmd->timeout; 2609 piocb->ulpTimeout = lpfc_cmd->timeout;
2111 } 2610
2611 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2612 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2112 2613
2113 return 1; 2614 return 1;
2114} 2615}
2115 2616
2116/** 2617/**
2618 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2619 * @phba: The hba struct for which this call is being executed.
2620 * @dev_grp: The HBA PCI-Device group number.
2621 *
2622 * This routine sets up the SCSI interface API function jump table in @phba
2623 * struct.
2624 * Returns: 0 - success, -ENODEV - failure.
2625 **/
2626int
2627lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2628{
2629
2630 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2631 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2632 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2633
2634 switch (dev_grp) {
2635 case LPFC_PCI_DEV_LP:
2636 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2637 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2638 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2639 break;
2640 case LPFC_PCI_DEV_OC:
2641 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2642 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2643 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2644 break;
2645 default:
2646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2647 "1418 Invalid HBA PCI-device group: 0x%x\n",
2648 dev_grp);
2649 return -ENODEV;
2650 break;
2651 }
2652 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2653 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2654 return 0;
2655}
2656
2657/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2658 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2659 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2660 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2135,73 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2135} 2676}
2136 2677
2137/** 2678/**
2138 * lpfc_scsi_tgt_reset - Target reset handler
2139 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2140 * @vport: The virtual port for which this call is being executed.
2141 * @tgt_id: Target ID.
2142 * @lun: Lun number.
2143 * @rdata: Pointer to lpfc_rport_data.
2144 *
2145 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2146 *
2147 * Return Code:
2148 * 0x2003 - Error
2149 * 0x2002 - Success.
2150 **/
2151static int
2152lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2153 unsigned tgt_id, unsigned int lun,
2154 struct lpfc_rport_data *rdata)
2155{
2156 struct lpfc_hba *phba = vport->phba;
2157 struct lpfc_iocbq *iocbq;
2158 struct lpfc_iocbq *iocbqrsp;
2159 int ret;
2160 int status;
2161
2162 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2163 return FAILED;
2164
2165 lpfc_cmd->rdata = rdata;
2166 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2167 FCP_TARGET_RESET);
2168 if (!status)
2169 return FAILED;
2170
2171 iocbq = &lpfc_cmd->cur_iocbq;
2172 iocbqrsp = lpfc_sli_get_iocbq(phba);
2173
2174 if (!iocbqrsp)
2175 return FAILED;
2176
2177 /* Issue Target Reset to TGT <num> */
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba,
2182 &phba->sli.ring[phba->sli.fcp_ring],
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2187 ret = TIMEOUT_ERROR;
2188 } else
2189 ret = FAILED;
2190 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2191 } else {
2192 ret = SUCCESS;
2193 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2194 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2195 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2196 (lpfc_cmd->result & IOERR_DRVR_MASK))
2197 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2198 }
2199
2200 lpfc_sli_release_iocbq(phba, iocbqrsp);
2201 return ret;
2202}
2203
2204/**
2205 * lpfc_info - Info entry point of scsi_host_template data structure 2679 * lpfc_info - Info entry point of scsi_host_template data structure
2206 * @host: The scsi host for which this call is being executed. 2680 * @host: The scsi host for which this call is being executed.
2207 * 2681 *
@@ -2305,7 +2779,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2779 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2780 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2781 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2782 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2783 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2784 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2378,15 +2851,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 2851 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2852 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 2853 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 2854 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 2855 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 2856 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 2857 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 2859 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 2860 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 2861 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 2862 blk_rq_sectors(cmnd->request),
2390 cmnd); 2863 cmnd);
2391 2864
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2865 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2879,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 2879 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2880 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 2881 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 2882 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 2883 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 2884 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 2885 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2886 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 2887 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 2888 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 2889 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 2890 blk_rq_sectors(cmnd->request), cmnd);
2418 else 2891 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2892 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 2893 "9042 dbg: parser not implemented\n");
@@ -2427,7 +2900,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 2900 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 2901
2429 atomic_inc(&ndlp->cmd_pending); 2902 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 2903 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 2904 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 2905 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 2906 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +2963,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 2963 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2964 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 2965 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 2966 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 2967 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 2968 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3003,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3003 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3004 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3005 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3006 if (phba->sli_rev == LPFC_SLI_REV4)
3007 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3008 else
3009 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3010
2536 icmd->ulpLe = 1; 3011 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3012 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3017,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3017
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3018 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3019 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3020 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3021 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3022 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3023 ret = FAILED;
2548 goto out; 3024 goto out;
@@ -2579,157 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2579 return ret; 3055 return ret;
2580} 3056}
2581 3057
3058static char *
3059lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3060{
3061 switch (task_mgmt_cmd) {
3062 case FCP_ABORT_TASK_SET:
3063 return "ABORT_TASK_SET";
3064 case FCP_CLEAR_TASK_SET:
3065 return "FCP_CLEAR_TASK_SET";
3066 case FCP_BUS_RESET:
3067 return "FCP_BUS_RESET";
3068 case FCP_LUN_RESET:
3069 return "FCP_LUN_RESET";
3070 case FCP_TARGET_RESET:
3071 return "FCP_TARGET_RESET";
3072 case FCP_CLEAR_ACA:
3073 return "FCP_CLEAR_ACA";
3074 case FCP_TERMINATE_TASK:
3075 return "FCP_TERMINATE_TASK";
3076 default:
3077 return "unknown";
3078 }
3079}
3080
2582/** 3081/**
2583 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3082 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
2584 * @cmnd: Pointer to scsi_cmnd data structure. 3083 * @vport: The virtual port for which this call is being executed.
3084 * @rdata: Pointer to remote port local data
3085 * @tgt_id: Target ID of remote device.
3086 * @lun_id: Lun number for the TMF
3087 * @task_mgmt_cmd: type of TMF to send
2585 * 3088 *
2586 * This routine does a device reset by sending a TARGET_RESET task management 3089 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
2587 * command. 3090 * a remote port.
2588 * 3091 *
2589 * Return code : 3092 * Return Code:
2590 * 0x2003 - Error 3093 * 0x2003 - Error
2591 * 0x2002 - Success 3094 * 0x2002 - Success.
2592 **/ 3095 **/
2593static int 3096static int
2594lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3097lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3098 unsigned tgt_id, unsigned int lun_id,
3099 uint8_t task_mgmt_cmd)
2595{ 3100{
2596 struct Scsi_Host *shost = cmnd->device->host;
2597 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2598 struct lpfc_hba *phba = vport->phba; 3101 struct lpfc_hba *phba = vport->phba;
2599 struct lpfc_scsi_buf *lpfc_cmd; 3102 struct lpfc_scsi_buf *lpfc_cmd;
2600 struct lpfc_iocbq *iocbq, *iocbqrsp; 3103 struct lpfc_iocbq *iocbq;
2601 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3104 struct lpfc_iocbq *iocbqrsp;
2602 struct lpfc_nodelist *pnode = rdata->pnode; 3105 int ret;
2603 unsigned long later;
2604 int ret = SUCCESS;
2605 int status; 3106 int status;
2606 int cnt;
2607 struct lpfc_scsi_event_header scsi_event;
2608 3107
2609 lpfc_block_error_handler(cmnd); 3108 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2610 /*
2611 * If target is not in a MAPPED state, delay the reset until
2612 * target is rediscovered or devloss timeout expires.
2613 */
2614 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2615 while (time_after(later, jiffies)) {
2616 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2617 return FAILED;
2618 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
2619 break;
2620 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
2621 rdata = cmnd->device->hostdata;
2622 if (!rdata)
2623 break;
2624 pnode = rdata->pnode;
2625 }
2626
2627 scsi_event.event_type = FC_REG_SCSI_EVENT;
2628 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
2629 scsi_event.lun = 0;
2630 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
2631 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
2632
2633 fc_host_post_vendor_event(shost,
2634 fc_get_event_number(),
2635 sizeof(scsi_event),
2636 (char *)&scsi_event,
2637 LPFC_NL_VENDOR_ID);
2638
2639 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
2640 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2641 "0721 LUN Reset rport "
2642 "failure: msec x%x rdata x%p\n",
2643 jiffies_to_msecs(jiffies - later), rdata);
2644 return FAILED; 3109 return FAILED;
2645 } 3110
2646 lpfc_cmd = lpfc_get_scsi_buf(phba); 3111 lpfc_cmd = lpfc_get_scsi_buf(phba);
2647 if (lpfc_cmd == NULL) 3112 if (lpfc_cmd == NULL)
2648 return FAILED; 3113 return FAILED;
2649 lpfc_cmd->timeout = 60; 3114 lpfc_cmd->timeout = 60;
2650 lpfc_cmd->rdata = rdata; 3115 lpfc_cmd->rdata = rdata;
2651 3116
2652 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, 3117 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
2653 cmnd->device->lun, 3118 task_mgmt_cmd);
2654 FCP_TARGET_RESET);
2655 if (!status) { 3119 if (!status) {
2656 lpfc_release_scsi_buf(phba, lpfc_cmd); 3120 lpfc_release_scsi_buf(phba, lpfc_cmd);
2657 return FAILED; 3121 return FAILED;
2658 } 3122 }
2659 iocbq = &lpfc_cmd->cur_iocbq;
2660 3123
2661 /* get a buffer for this IOCB command response */ 3124 iocbq = &lpfc_cmd->cur_iocbq;
2662 iocbqrsp = lpfc_sli_get_iocbq(phba); 3125 iocbqrsp = lpfc_sli_get_iocbq(phba);
2663 if (iocbqrsp == NULL) { 3126 if (iocbqrsp == NULL) {
2664 lpfc_release_scsi_buf(phba, lpfc_cmd); 3127 lpfc_release_scsi_buf(phba, lpfc_cmd);
2665 return FAILED; 3128 return FAILED;
2666 } 3129 }
3130
2667 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3131 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2668 "0703 Issue target reset to TGT %d LUN %d " 3132 "0702 Issue %s to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3133 "rpi x%x nlp_flag x%x\n",
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3134 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
2671 status = lpfc_sli_issue_iocb_wait(phba, 3135 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2672 &phba->sli.ring[phba->sli.fcp_ring], 3136
3137 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3138 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3139 if (status != IOCB_SUCCESS) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3140 if (status == IOCB_TIMEDOUT) {
2676 ret = TIMEOUT_ERROR; 3141 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2677 } else { 3142 ret = TIMEOUT_ERROR;
2678 if (status != IOCB_SUCCESS) 3143 } else
2679 ret = FAILED; 3144 ret = FAILED;
2680 lpfc_release_scsi_buf(phba, lpfc_cmd); 3145 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2681 } 3146 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2682 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3147 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
2683 "0713 SCSI layer issued device reset (%d, %d) " 3148 lpfc_taskmgmt_name(task_mgmt_cmd),
2684 "return x%x status x%x result x%x\n", 3149 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
2685 cmnd->device->id, cmnd->device->lun, ret,
2686 iocbqrsp->iocb.ulpStatus,
2687 iocbqrsp->iocb.un.ulpWord[4]); 3150 iocbqrsp->iocb.un.ulpWord[4]);
3151 } else
3152 ret = SUCCESS;
3153
2688 lpfc_sli_release_iocbq(phba, iocbqrsp); 3154 lpfc_sli_release_iocbq(phba, iocbqrsp);
2689 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 3155
2690 LPFC_CTX_TGT); 3156 if (ret != TIMEOUT_ERROR)
3157 lpfc_release_scsi_buf(phba, lpfc_cmd);
3158
3159 return ret;
3160}
3161
3162/**
3163 * lpfc_chk_tgt_mapped -
3164 * @vport: The virtual port to check on
3165 * @cmnd: Pointer to scsi_cmnd data structure.
3166 *
3167 * This routine delays until the scsi target (aka rport) for the
3168 * command exists (is present and logged in) or we declare it non-existent.
3169 *
3170 * Return code :
3171 * 0x2003 - Error
3172 * 0x2002 - Success
3173 **/
3174static int
3175lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3176{
3177 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3178 struct lpfc_nodelist *pnode = rdata->pnode;
3179 unsigned long later;
3180
3181 /*
3182 * If target is not in a MAPPED state, delay until
3183 * target is rediscovered or devloss timeout expires.
3184 */
3185 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3186 while (time_after(later, jiffies)) {
3187 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3188 return FAILED;
3189 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3190 return SUCCESS;
3191 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3192 rdata = cmnd->device->hostdata;
3193 if (!rdata)
3194 return FAILED;
3195 pnode = rdata->pnode;
3196 }
3197 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3198 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3199 return FAILED;
3200 return SUCCESS;
3201}
3202
3203/**
3204 * lpfc_reset_flush_io_context -
3205 * @vport: The virtual port (scsi_host) for the flush context
3206 * @tgt_id: If aborting by Target contect - specifies the target id
3207 * @lun_id: If aborting by Lun context - specifies the lun id
3208 * @context: specifies the context level to flush at.
3209 *
3210 * After a reset condition via TMF, we need to flush orphaned i/o
3211 * contexts from the adapter. This routine aborts any contexts
3212 * outstanding, then waits for their completions. The wait is
3213 * bounded by devloss_tmo though.
3214 *
3215 * Return code :
3216 * 0x2003 - Error
3217 * 0x2002 - Success
3218 **/
3219static int
3220lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3221 uint64_t lun_id, lpfc_ctx_cmd context)
3222{
3223 struct lpfc_hba *phba = vport->phba;
3224 unsigned long later;
3225 int cnt;
3226
3227 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
2691 if (cnt) 3228 if (cnt)
2692 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3229 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2693 cmnd->device->id, cmnd->device->lun, 3230 tgt_id, lun_id, context);
2694 LPFC_CTX_TGT);
2695 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3231 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2696 while (time_after(later, jiffies) && cnt) { 3232 while (time_after(later, jiffies) && cnt) {
2697 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3233 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2698 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 3234 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
2699 cmnd->device->lun, LPFC_CTX_TGT);
2700 } 3235 }
2701 if (cnt) { 3236 if (cnt) {
2702 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3237 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2703 "0719 device reset I/O flush failure: " 3238 "0724 I/O flush failure for context %s : cnt x%x\n",
2704 "cnt x%x\n", cnt); 3239 ((context == LPFC_CTX_LUN) ? "LUN" :
2705 ret = FAILED; 3240 ((context == LPFC_CTX_TGT) ? "TGT" :
3241 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3242 cnt);
3243 return FAILED;
2706 } 3244 }
2707 return ret; 3245 return SUCCESS;
3246}
3247
3248/**
3249 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3250 * @cmnd: Pointer to scsi_cmnd data structure.
3251 *
3252 * This routine does a device reset by sending a LUN_RESET task management
3253 * command.
3254 *
3255 * Return code :
3256 * 0x2003 - Error
3257 * 0x2002 - Success
3258 **/
3259static int
3260lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3261{
3262 struct Scsi_Host *shost = cmnd->device->host;
3263 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3264 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3265 struct lpfc_nodelist *pnode = rdata->pnode;
3266 unsigned tgt_id = cmnd->device->id;
3267 unsigned int lun_id = cmnd->device->lun;
3268 struct lpfc_scsi_event_header scsi_event;
3269 int status;
3270
3271 lpfc_block_error_handler(cmnd);
3272
3273 status = lpfc_chk_tgt_mapped(vport, cmnd);
3274 if (status == FAILED) {
3275 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3276 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3277 return FAILED;
3278 }
3279
3280 scsi_event.event_type = FC_REG_SCSI_EVENT;
3281 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3282 scsi_event.lun = lun_id;
3283 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3284 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3285
3286 fc_host_post_vendor_event(shost, fc_get_event_number(),
3287 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3288
3289 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3290 FCP_LUN_RESET);
3291
3292 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3293 "0713 SCSI layer issued Device Reset (%d, %d) "
3294 "return x%x\n", tgt_id, lun_id, status);
3295
3296 /*
3297 * We have to clean up i/o as : they may be orphaned by the TMF;
3298 * or if the TMF failed, they may be in an indeterminate state.
3299 * So, continue on.
3300 * We will report success if all the i/o aborts successfully.
3301 */
3302 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3303 LPFC_CTX_LUN);
3304 return status;
3305}
3306
3307/**
3308 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3309 * @cmnd: Pointer to scsi_cmnd data structure.
3310 *
3311 * This routine does a target reset by sending a TARGET_RESET task management
3312 * command.
3313 *
3314 * Return code :
3315 * 0x2003 - Error
3316 * 0x2002 - Success
3317 **/
3318static int
3319lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3320{
3321 struct Scsi_Host *shost = cmnd->device->host;
3322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3323 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3324 struct lpfc_nodelist *pnode = rdata->pnode;
3325 unsigned tgt_id = cmnd->device->id;
3326 unsigned int lun_id = cmnd->device->lun;
3327 struct lpfc_scsi_event_header scsi_event;
3328 int status;
3329
3330 lpfc_block_error_handler(cmnd);
3331
3332 status = lpfc_chk_tgt_mapped(vport, cmnd);
3333 if (status == FAILED) {
3334 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3335 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3336 return FAILED;
3337 }
3338
3339 scsi_event.event_type = FC_REG_SCSI_EVENT;
3340 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3341 scsi_event.lun = 0;
3342 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3343 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3344
3345 fc_host_post_vendor_event(shost, fc_get_event_number(),
3346 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3347
3348 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3349 FCP_TARGET_RESET);
3350
3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 "0723 SCSI layer issued Target Reset (%d, %d) "
3353 "return x%x\n", tgt_id, lun_id, status);
3354
3355 /*
3356 * We have to clean up i/o as : they may be orphaned by the TMF;
3357 * or if the TMF failed, they may be in an indeterminate state.
3358 * So, continue on.
3359 * We will report success if all the i/o aborts successfully.
3360 */
3361 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3362 LPFC_CTX_TGT);
3363 return status;
2708} 3364}
2709 3365
2710/** 3366/**
2711 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3367 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
2712 * @cmnd: Pointer to scsi_cmnd data structure. 3368 * @cmnd: Pointer to scsi_cmnd data structure.
2713 * 3369 *
2714 * This routine does target reset to all target on @cmnd->device->host. 3370 * This routine does target reset to all targets on @cmnd->device->host.
3371 * This emulates Parallel SCSI Bus Reset Semantics.
2715 * 3372 *
2716 * Return Code: 3373 * Return code :
2717 * 0x2003 - Error 3374 * 0x2003 - Error
2718 * 0x2002 - Success 3375 * 0x2002 - Success
2719 **/ 3376 **/
2720static int 3377static int
2721lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3378lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2722{ 3379{
2723 struct Scsi_Host *shost = cmnd->device->host; 3380 struct Scsi_Host *shost = cmnd->device->host;
2724 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3381 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2725 struct lpfc_hba *phba = vport->phba;
2726 struct lpfc_nodelist *ndlp = NULL; 3382 struct lpfc_nodelist *ndlp = NULL;
2727 int match;
2728 int ret = SUCCESS, status = SUCCESS, i;
2729 int cnt;
2730 struct lpfc_scsi_buf * lpfc_cmd;
2731 unsigned long later;
2732 struct lpfc_scsi_event_header scsi_event; 3383 struct lpfc_scsi_event_header scsi_event;
3384 int match;
3385 int ret = SUCCESS, status, i;
2733 3386
2734 scsi_event.event_type = FC_REG_SCSI_EVENT; 3387 scsi_event.event_type = FC_REG_SCSI_EVENT;
2735 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3388 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@@ -2737,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2737 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3390 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
2738 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3391 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
2739 3392
2740 fc_host_post_vendor_event(shost, 3393 fc_host_post_vendor_event(shost, fc_get_event_number(),
2741 fc_get_event_number(), 3394 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
2742 sizeof(scsi_event),
2743 (char *)&scsi_event,
2744 LPFC_NL_VENDOR_ID);
2745 3395
2746 lpfc_block_error_handler(cmnd); 3396 lpfc_block_error_handler(cmnd);
3397
2747 /* 3398 /*
2748 * Since the driver manages a single bus device, reset all 3399 * Since the driver manages a single bus device, reset all
2749 * targets known to the driver. Should any target reset 3400 * targets known to the driver. Should any target reset
@@ -2766,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2766 spin_unlock_irq(shost->host_lock); 3417 spin_unlock_irq(shost->host_lock);
2767 if (!match) 3418 if (!match)
2768 continue; 3419 continue;
2769 lpfc_cmd = lpfc_get_scsi_buf(phba); 3420
2770 if (lpfc_cmd) { 3421 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
2771 lpfc_cmd->timeout = 60; 3422 i, 0, FCP_TARGET_RESET);
2772 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 3423
2773 cmnd->device->lun, 3424 if (status != SUCCESS) {
2774 ndlp->rport->dd_data);
2775 if (status != TIMEOUT_ERROR)
2776 lpfc_release_scsi_buf(phba, lpfc_cmd);
2777 }
2778 if (!lpfc_cmd || status != SUCCESS) {
2779 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3425 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2780 "0700 Bus Reset on target %d failed\n", 3426 "0700 Bus Reset on target %d failed\n",
2781 i); 3427 i);
@@ -2783,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2783 } 3429 }
2784 } 3430 }
2785 /* 3431 /*
2786 * All outstanding txcmplq I/Os should have been aborted by 3432 * We have to clean up i/o as : they may be orphaned by the TMFs
2787 * the targets. Unfortunately, some targets do not abide by 3433 * above; or if any of the TMFs failed, they may be in an
2788 * this forcing the driver to double check. 3434 * indeterminate state.
3435 * We will report success if all the i/o aborts successfully.
2789 */ 3436 */
2790 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 3437
2791 if (cnt) 3438 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
2792 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3439 if (status != SUCCESS)
2793 0, 0, LPFC_CTX_HOST);
2794 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2795 while (time_after(later, jiffies) && cnt) {
2796 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2797 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2798 }
2799 if (cnt) {
2800 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2801 "0715 Bus Reset I/O flush failure: "
2802 "cnt x%x left x%x\n", cnt, i);
2803 ret = FAILED; 3440 ret = FAILED;
2804 } 3441
2805 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3442 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2806 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3443 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
2807 return ret; 3444 return ret;
@@ -2825,11 +3462,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3462{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3463 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3464 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3465 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3466 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3467 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3468 int num_allocated = 0;
2833 3469
2834 if (!rport || fc_remote_port_chkready(rport)) 3470 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3471 return -ENXIO;
@@ -2863,20 +3499,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3499 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3500 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3501 }
2866 3502 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3503 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3504 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3505 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3506 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3507 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3508 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3509 }
2881 return 0; 3510 return 0;
2882} 3511}
@@ -2942,7 +3571,8 @@ struct scsi_host_template lpfc_template = {
2942 .info = lpfc_info, 3571 .info = lpfc_info,
2943 .queuecommand = lpfc_queuecommand, 3572 .queuecommand = lpfc_queuecommand,
2944 .eh_abort_handler = lpfc_abort_handler, 3573 .eh_abort_handler = lpfc_abort_handler,
2945 .eh_device_reset_handler= lpfc_device_reset_handler, 3574 .eh_device_reset_handler = lpfc_device_reset_handler,
3575 .eh_target_reset_handler = lpfc_target_reset_handler,
2946 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3576 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2947 .slave_alloc = lpfc_slave_alloc, 3577 .slave_alloc = lpfc_slave_alloc,
2948 .slave_configure = lpfc_slave_configure, 3578 .slave_configure = lpfc_slave_configure,
@@ -2962,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = {
2962 .info = lpfc_info, 3592 .info = lpfc_info,
2963 .queuecommand = lpfc_queuecommand, 3593 .queuecommand = lpfc_queuecommand,
2964 .eh_abort_handler = lpfc_abort_handler, 3594 .eh_abort_handler = lpfc_abort_handler,
2965 .eh_device_reset_handler= lpfc_device_reset_handler, 3595 .eh_device_reset_handler = lpfc_device_reset_handler,
3596 .eh_target_reset_handler = lpfc_target_reset_handler,
2966 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3597 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2967 .slave_alloc = lpfc_slave_alloc, 3598 .slave_alloc = lpfc_slave_alloc,
2968 .slave_configure = lpfc_slave_configure, 3599 .slave_configure = lpfc_slave_configure,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa29..65dfc8bd5b49 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba4..acc43b061ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,493 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp);
4145 return -EIO;
4146 }
4147
4148 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4149 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4150 kfree(mp);
4151 return 0;
4152}
4153
4154/**
4155 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4156 * @phba: pointer to lpfc hba data structure.
4157 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4158 * @vpd: pointer to the memory to hold resulting port vpd data.
4159 * @vpd_size: On input, the number of bytes allocated to @vpd.
4160 * On output, the number of data bytes in @vpd.
4161 *
4162 * This routine executes a READ_REV SLI4 mailbox command. In
4163 * addition, this routine gets the port vpd data.
4164 *
4165 * Return codes
4166 * 0 - sucessful
4167 * ENOMEM - could not allocated memory.
4168 **/
4169static int
4170lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4171 uint8_t *vpd, uint32_t *vpd_size)
4172{
4173 int rc = 0;
4174 uint32_t dma_size;
4175 struct lpfc_dmabuf *dmabuf;
4176 struct lpfc_mqe *mqe;
4177
4178 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4179 if (!dmabuf)
4180 return -ENOMEM;
4181
4182 /*
4183 * Get a DMA buffer for the vpd data resulting from the READ_REV
4184 * mailbox command.
4185 */
4186 dma_size = *vpd_size;
4187 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4188 dma_size,
4189 &dmabuf->phys,
4190 GFP_KERNEL);
4191 if (!dmabuf->virt) {
4192 kfree(dmabuf);
4193 return -ENOMEM;
4194 }
4195 memset(dmabuf->virt, 0, dma_size);
4196
4197 /*
4198 * The SLI4 implementation of READ_REV conflicts at word1,
4199 * bits 31:16 and SLI4 adds vpd functionality not present
4200 * in SLI3. This code corrects the conflicts.
4201 */
4202 lpfc_read_rev(phba, mboxq);
4203 mqe = &mboxq->u.mqe;
4204 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4205 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4206 mqe->un.read_rev.word1 &= 0x0000FFFF;
4207 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4208 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4209
4210 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4211 if (rc) {
4212 dma_free_coherent(&phba->pcidev->dev, dma_size,
4213 dmabuf->virt, dmabuf->phys);
4214 return -EIO;
4215 }
4216
4217 /*
4218 * The available vpd length cannot be bigger than the
4219 * DMA buffer passed to the port. Catch the less than
4220 * case and update the caller's size.
4221 */
4222 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4223 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4224
4225 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4226 dma_free_coherent(&phba->pcidev->dev, dma_size,
4227 dmabuf->virt, dmabuf->phys);
4228 kfree(dmabuf);
4229 return 0;
4230}
4231
4232/**
4233 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4234 * @phba: pointer to lpfc hba data structure.
4235 *
4236 * This routine is called to explicitly arm the SLI4 device's completion and
4237 * event queues
4238 **/
4239static void
4240lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4241{
4242 uint8_t fcp_eqidx;
4243
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM);
4250 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4251 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4252 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4253 LPFC_QUEUE_REARM);
4254}
4255
4256/**
4257 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4258 * @phba: Pointer to HBA context object.
4259 *
4260 * This function is the main SLI4 device intialization PCI function. This
4261 * function is called by the HBA intialization code, HBA reset code and
4262 * HBA error attention handler code. Caller is not required to hold any
4263 * locks.
4264 **/
4265int
4266lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4267{
4268 int rc;
4269 LPFC_MBOXQ_t *mboxq;
4270 struct lpfc_mqe *mqe;
4271 uint8_t *vpd;
4272 uint32_t vpd_size;
4273 uint32_t ftr_rsp = 0;
4274 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4275 struct lpfc_vport *vport = phba->pport;
4276 struct lpfc_dmabuf *mp;
4277
4278 /* Perform a PCI function reset to start from clean */
4279 rc = lpfc_pci_function_reset(phba);
4280 if (unlikely(rc))
4281 return -ENODEV;
4282
4283 /* Check the HBA Host Status Register for readyness */
4284 rc = lpfc_sli4_post_status_check(phba);
4285 if (unlikely(rc))
4286 return -ENODEV;
4287 else {
4288 spin_lock_irq(&phba->hbalock);
4289 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4290 spin_unlock_irq(&phba->hbalock);
4291 }
4292
4293 /*
4294 * Allocate a single mailbox container for initializing the
4295 * port.
4296 */
4297 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4298 if (!mboxq)
4299 return -ENOMEM;
4300
4301 /*
4302 * Continue initialization with default values even if driver failed
4303 * to read FCoE param config regions
4304 */
4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4307 "2570 Failed to read FCoE parameters \n");
4308
4309 /* Issue READ_REV to collect vpd and FW information. */
4310 vpd_size = PAGE_SIZE;
4311 vpd = kzalloc(vpd_size, GFP_KERNEL);
4312 if (!vpd) {
4313 rc = -ENOMEM;
4314 goto out_free_mbox;
4315 }
4316
4317 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4318 if (unlikely(rc))
4319 goto out_free_vpd;
4320
4321 mqe = &mboxq->u.mqe;
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT;
4325 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4328 "0376 READ_REV Error. SLI Level %d "
4329 "FCoE enabled %d\n",
4330 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4331 rc = -EIO;
4332 goto out_free_vpd;
4333 }
4334 /*
4335 * Evaluate the read rev and vpd data. Populate the driver
4336 * state with the results. If this routine fails, the failure
4337 * is not fatal as the driver will use generic values.
4338 */
4339 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4340 if (unlikely(!rc)) {
4341 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4342 "0377 Error %d parsing vpd. "
4343 "Using defaults.\n", rc);
4344 rc = 0;
4345 }
4346
4347 /* Save information as VPD data */
4348 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4349 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4352 &mqe->un.read_rev);
4353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4354 &mqe->un.read_rev);
4355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4356 &mqe->un.read_rev);
4357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4358 &mqe->un.read_rev);
4359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4366 "(%d):0380 READ_REV Status x%x "
4367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4368 mboxq->vport ? mboxq->vport->vpi : 0,
4369 bf_get(lpfc_mqe_status, mqe),
4370 phba->vpd.rev.opFwName,
4371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4373
4374 /*
4375 * Discover the port's supported feature set and match it against the
4376 * hosts requests.
4377 */
4378 lpfc_request_features(phba, mboxq);
4379 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4380 if (unlikely(rc)) {
4381 rc = -EIO;
4382 goto out_free_vpd;
4383 }
4384
4385 /*
4386 * The port must support FCP initiator mode as this is the
4387 * only mode running in the host.
4388 */
4389 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4390 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4391 "0378 No support for fcpi mode.\n");
4392 ftr_rsp++;
4393 }
4394
4395 /*
4396 * If the port cannot support the host's requested features
4397 * then turn off the global config parameters to disable the
4398 * feature in the driver. This is not a fatal error.
4399 */
4400 if ((phba->cfg_enable_bg) &&
4401 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4402 ftr_rsp++;
4403
4404 if (phba->max_vpi && phba->cfg_enable_npiv &&
4405 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4406 ftr_rsp++;
4407
4408 if (ftr_rsp) {
4409 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4410 "0379 Feature Mismatch Data: x%08x %08x "
4411 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4412 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4413 phba->cfg_enable_npiv, phba->max_vpi);
4414 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4415 phba->cfg_enable_bg = 0;
4416 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4417 phba->cfg_enable_npiv = 0;
4418 }
4419
4420 /* These SLI3 features are assumed in SLI4 */
4421 spin_lock_irq(&phba->hbalock);
4422 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4423 spin_unlock_irq(&phba->hbalock);
4424
4425 /* Read the port's service parameters. */
4426 lpfc_read_sparam(phba, mboxq, vport->vpi);
4427 mboxq->vport = vport;
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 mp = (struct lpfc_dmabuf *) mboxq->context1;
4430 if (rc == MBX_SUCCESS) {
4431 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4432 rc = 0;
4433 }
4434
4435 /*
4436 * This memory was allocated by the lpfc_read_sparam routine. Release
4437 * it to the mbuf pool.
4438 */
4439 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4440 kfree(mp);
4441 mboxq->context1 = NULL;
4442 if (unlikely(rc)) {
4443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4444 "0382 READ_SPARAM command failed "
4445 "status %d, mbxStatus x%x\n",
4446 rc, bf_get(lpfc_mqe_status, mqe));
4447 phba->link_state = LPFC_HBA_ERROR;
4448 rc = -EIO;
4449 goto out_free_vpd;
4450 }
4451
4452 if (phba->cfg_soft_wwnn)
4453 u64_to_wwn(phba->cfg_soft_wwnn,
4454 vport->fc_sparam.nodeName.u.wwn);
4455 if (phba->cfg_soft_wwpn)
4456 u64_to_wwn(phba->cfg_soft_wwpn,
4457 vport->fc_sparam.portName.u.wwn);
4458 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4459 sizeof(struct lpfc_name));
4460 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4461 sizeof(struct lpfc_name));
4462
4463 /* Update the fc_host data structures with new wwn. */
4464 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4465 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4466
4467 /* Register SGL pool to the device using non-embedded mailbox command */
4468 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc);
4472 rc = -ENODEV;
4473 goto out_free_vpd;
4474 }
4475
4476 /* Register SCSI SGL pool to the device */
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation",
4481 rc);
4482 /* Some Scsi buffers were moved to the abort scsi list */
4483 /* A pci function reset will repost them */
4484 rc = -ENODEV;
4485 goto out_free_vpd;
4486 }
4487
4488 /* Post the rpi header region to the device. */
4489 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4490 if (unlikely(rc)) {
4491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4492 "0393 Error %d during rpi post operation\n",
4493 rc);
4494 rc = -ENODEV;
4495 goto out_free_vpd;
4496 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501
4502 /* Set up all the queues to the device */
4503 rc = lpfc_sli4_queue_setup(phba);
4504 if (unlikely(rc)) {
4505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4506 "0381 Error %d during queue setup.\n ", rc);
4507 goto out_stop_timers;
4508 }
4509
4510 /* Arm the CQs and then EQs on device */
4511 lpfc_sli4_arm_cqeq_intr(phba);
4512
4513 /* Indicate device interrupt mode */
4514 phba->sli4_hba.intr_enable = 1;
4515
4516 /* Allow asynchronous mailbox command to go through */
4517 spin_lock_irq(&phba->hbalock);
4518 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4519 spin_unlock_irq(&phba->hbalock);
4520
4521 /* Post receive buffers to the device */
4522 lpfc_sli4_rb_setup(phba);
4523
4524 /* Start the ELS watchdog timer */
4525 /*
4526 * The driver for SLI4 is not yet ready to process timeouts
4527 * or interrupts. Once it is, the comment bars can be removed.
4528 */
4529 /* mod_timer(&vport->els_tmofunc,
4530 * jiffies + HZ * (phba->fc_ratov*2)); */
4531
4532 /* Start heart beat timer */
4533 mod_timer(&phba->hb_tmofunc,
4534 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4535 phba->hb_outstanding = 0;
4536 phba->last_completion_time = jiffies;
4537
4538 /* Start error attention (ERATT) polling timer */
4539 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4540
4541 /*
4542 * The port is ready, set the host's link state to LINK_DOWN
4543 * in preparation for link interrupts.
4544 */
4545 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4546 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4547 lpfc_set_loopback_flag(phba);
4548 /* Change driver state to LPFC_LINK_DOWN right before init link */
4549 spin_lock_irq(&phba->hbalock);
4550 phba->link_state = LPFC_LINK_DOWN;
4551 spin_unlock_irq(&phba->hbalock);
4552 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4553 if (unlikely(rc != MBX_NOT_FINISHED)) {
4554 kfree(vpd);
4555 return 0;
4556 } else
4557 rc = -EIO;
4558
4559 /* Unset all the queues set up in this routine when error out */
4560 if (rc)
4561 lpfc_sli4_queue_unset(phba);
4562
4563out_stop_timers:
4564 if (rc)
4565 lpfc_stop_hba_timers(phba);
4566out_free_vpd:
4567 kfree(vpd);
4568out_free_mbox:
4569 mempool_free(mboxq, phba->mbox_mem_pool);
4570 return rc;
4571}
3203 4572
3204/** 4573/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4574 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4613,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4613lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4614{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4615 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4616 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4617 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4618 struct lpfc_sli_ring *pring;
3250 4619
@@ -3281,7 +4650,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4650 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4651 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4652 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4653 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4654 spin_unlock_irq(&phba->hbalock);
3286 4655
3287 pring = &psli->ring[psli->fcp_ring]; 4656 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4658,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4658
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4659 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4660 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4661
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4662 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4663 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4664}
3308 4665
3309/** 4666/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4667 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4668 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4669 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4670 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4671 *
3315 * This function is called by discovery code and HBA management code 4672 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4673 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4674 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4675 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4676 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4677 * mailbox.
@@ -3332,8 +4689,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4689 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4690 * the function.
3334 **/ 4691 **/
3335int 4692static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4693lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4694 uint32_t flag)
3337{ 4695{
3338 MAILBOX_t *mb; 4696 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4697 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4707,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4707 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4708 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4709 /* processing mbox queue from intr_handler */
4710 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4711 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4712 return MBX_SUCCESS;
4713 }
3352 processing_queue = 1; 4714 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4715 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4716 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4727,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4727 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4728 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4729 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4730 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4731 dump_stack();
3370 goto out_not_finished; 4732 goto out_not_finished;
3371 } 4733 }
@@ -3385,21 +4747,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4747
3386 psli = &phba->sli; 4748 psli = &phba->sli;
3387 4749
3388 mb = &pmbox->mb; 4750 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4751 status = MBX_SUCCESS;
3390 4752
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4753 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4754 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4755
3394 /* Mbox command <mbxCommand> cannot issue */ 4756 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4757 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4758 "(%d):0311 Mailbox command x%x cannot "
4759 "issue Data: x%x x%x\n",
4760 pmbox->vport ? pmbox->vport->vpi : 0,
4761 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4762 goto out_not_finished;
3397 } 4763 }
3398 4764
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4765 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4766 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4767 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4768 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4769 "(%d):2528 Mailbox command x%x cannot "
4770 "issue Data: x%x x%x\n",
4771 pmbox->vport ? pmbox->vport->vpi : 0,
4772 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4773 goto out_not_finished;
3404 } 4774 }
3405 4775
@@ -3413,14 +4783,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4783 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4784
3415 /* Mbox command <mbxCommand> cannot issue */ 4785 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4787 "(%d):2529 Mailbox command x%x "
4788 "cannot issue Data: x%x x%x\n",
4789 pmbox->vport ? pmbox->vport->vpi : 0,
4790 pmbox->u.mb.mbxCommand,
4791 psli->sli_flag, flag);
3417 goto out_not_finished; 4792 goto out_not_finished;
3418 } 4793 }
3419 4794
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4795 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4796 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4797 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4799 "(%d):2530 Mailbox command x%x "
4800 "cannot issue Data: x%x x%x\n",
4801 pmbox->vport ? pmbox->vport->vpi : 0,
4802 pmbox->u.mb.mbxCommand,
4803 psli->sli_flag, flag);
3424 goto out_not_finished; 4804 goto out_not_finished;
3425 } 4805 }
3426 4806
@@ -3462,12 +4842,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4842
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4843 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4844 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4845 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4846 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4847 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4848 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4849 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4850 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4851 "(%d):2531 Mailbox command x%x "
4852 "cannot issue Data: x%x x%x\n",
4853 pmbox->vport ? pmbox->vport->vpi : 0,
4854 pmbox->u.mb.mbxCommand,
4855 psli->sli_flag, flag);
3471 goto out_not_finished; 4856 goto out_not_finished;
3472 } 4857 }
3473 /* timeout active mbox command */ 4858 /* timeout active mbox command */
@@ -3506,7 +4891,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4891 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4892 mb->mbxOwner = OWN_CHIP;
3508 4893
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4894 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4895 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4896 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4897 } else {
@@ -3529,7 +4914,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4914
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4915 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4916 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4917 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4918 }
3534 } 4919 }
3535 4920
@@ -3552,7 +4937,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4937 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4938 readl(phba->CAregaddr); /* flush */
3554 4939
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4940 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4941 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4942 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4943 word0 = le32_to_cpu(word0);
@@ -3591,7 +4976,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4976 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4977 }
3593 4978
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4979 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4980 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4981 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4982 word0 = le32_to_cpu(word0);
@@ -3604,7 +4989,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4989 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4990 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4991 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4992 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4993 word0 = slimword0;
3609 } 4994 }
3610 } 4995 }
@@ -3616,7 +5001,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 5001 ha_copy = readl(phba->HAregaddr);
3617 } 5002 }
3618 5003
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 5004 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5005 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5006 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5007 } else {
@@ -3643,13 +5028,527 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5028
3644out_not_finished: 5029out_not_finished:
3645 if (processing_queue) { 5030 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5031 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5032 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5033 }
3649 return MBX_NOT_FINISHED; 5034 return MBX_NOT_FINISHED;
3650} 5035}
3651 5036
3652/** 5037/**
5038 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5039 * @phba: Pointer to HBA context object.
5040 *
5041 * The function blocks the posting of SLI4 asynchronous mailbox commands from
5042 * the driver internal pending mailbox queue. It will then try to wait out the
5043 * possible outstanding mailbox command before return.
5044 *
5045 * Returns:
5046 * 0 - the outstanding mailbox command completed; otherwise, the wait for
5047 * the outstanding mailbox command timed out.
5048 **/
5049static int
5050lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5051{
5052 struct lpfc_sli *psli = &phba->sli;
5053 uint8_t actcmd = MBX_HEARTBEAT;
5054 int rc = 0;
5055 unsigned long timeout;
5056
5057 /* Mark the asynchronous mailbox command posting as blocked */
5058 spin_lock_irq(&phba->hbalock);
5059 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5060 if (phba->sli.mbox_active)
5061 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5062 spin_unlock_irq(&phba->hbalock);
5063 /* Determine how long we might wait for the active mailbox
5064 * command to be gracefully completed by firmware.
5065 */
5066 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5067 jiffies;
5068 /* Wait for the outstnading mailbox command to complete */
5069 while (phba->sli.mbox_active) {
5070 /* Check active mailbox complete status every 2ms */
5071 msleep(2);
5072 if (time_after(jiffies, timeout)) {
5073 /* Timeout, marked the outstanding cmd not complete */
5074 rc = 1;
5075 break;
5076 }
5077 }
5078
5079 /* Can not cleanly block async mailbox command, fails it */
5080 if (rc) {
5081 spin_lock_irq(&phba->hbalock);
5082 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5083 spin_unlock_irq(&phba->hbalock);
5084 }
5085 return rc;
5086}
5087
5088/**
5089 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5090 * @phba: Pointer to HBA context object.
5091 *
5092 * The function unblocks and resume posting of SLI4 asynchronous mailbox
5093 * commands from the driver internal pending mailbox queue. It makes sure
5094 * that there is no outstanding mailbox command before resuming posting
5095 * asynchronous mailbox commands. If, for any reason, there is outstanding
5096 * mailbox command, it will try to wait it out before resuming asynchronous
5097 * mailbox command posting.
5098 **/
5099static void
5100lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5101{
5102 struct lpfc_sli *psli = &phba->sli;
5103
5104 spin_lock_irq(&phba->hbalock);
5105 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5106 /* Asynchronous mailbox posting is not blocked, do nothing */
5107 spin_unlock_irq(&phba->hbalock);
5108 return;
5109 }
5110
5111 /* Outstanding synchronous mailbox command is guaranteed to be done,
5112 * successful or timeout, after timing-out the outstanding mailbox
5113 * command shall always be removed, so just unblock posting async
5114 * mailbox command and resume
5115 */
5116 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5117 spin_unlock_irq(&phba->hbalock);
5118
5119 /* wake up worker thread to post asynchronlous mailbox command */
5120 lpfc_worker_wake_up(phba);
5121}
5122
5123/**
5124 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5125 * @phba: Pointer to HBA context object.
5126 * @mboxq: Pointer to mailbox object.
5127 *
5128 * The function posts a mailbox to the port. The mailbox is expected
5129 * to be comletely filled in and ready for the port to operate on it.
5130 * This routine executes a synchronous completion operation on the
5131 * mailbox by polling for its completion.
5132 *
5133 * The caller must not be holding any locks when calling this routine.
5134 *
5135 * Returns:
5136 * MBX_SUCCESS - mailbox posted successfully
5137 * Any of the MBX error values.
5138 **/
5139static int
5140lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5141{
5142 int rc = MBX_SUCCESS;
5143 unsigned long iflag;
5144 uint32_t db_ready;
5145 uint32_t mcqe_status;
5146 uint32_t mbx_cmnd;
5147 unsigned long timeout;
5148 struct lpfc_sli *psli = &phba->sli;
5149 struct lpfc_mqe *mb = &mboxq->u.mqe;
5150 struct lpfc_bmbx_create *mbox_rgn;
5151 struct dma_address *dma_address;
5152 struct lpfc_register bmbx_reg;
5153
5154 /*
5155 * Only one mailbox can be active to the bootstrap mailbox region
5156 * at a time and there is no queueing provided.
5157 */
5158 spin_lock_irqsave(&phba->hbalock, iflag);
5159 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5160 spin_unlock_irqrestore(&phba->hbalock, iflag);
5161 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5162 "(%d):2532 Mailbox command x%x (x%x) "
5163 "cannot issue Data: x%x x%x\n",
5164 mboxq->vport ? mboxq->vport->vpi : 0,
5165 mboxq->u.mb.mbxCommand,
5166 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5167 psli->sli_flag, MBX_POLL);
5168 return MBXERR_ERROR;
5169 }
5170 /* The server grabs the token and owns it until release */
5171 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5172 phba->sli.mbox_active = mboxq;
5173 spin_unlock_irqrestore(&phba->hbalock, iflag);
5174
5175 /*
5176 * Initialize the bootstrap memory region to avoid stale data areas
5177 * in the mailbox post. Then copy the caller's mailbox contents to
5178 * the bmbx mailbox region.
5179 */
5180 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5181 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5182 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5183 sizeof(struct lpfc_mqe));
5184
5185 /* Post the high mailbox dma address to the port and wait for ready. */
5186 dma_address = &phba->sli4_hba.bmbx.dma_address;
5187 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5188
5189 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5190 * 1000) + jiffies;
5191 do {
5192 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5193 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5194 if (!db_ready)
5195 msleep(2);
5196
5197 if (time_after(jiffies, timeout)) {
5198 rc = MBXERR_ERROR;
5199 goto exit;
5200 }
5201 } while (!db_ready);
5202
5203 /* Post the low mailbox dma address to the port. */
5204 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5205 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5206 * 1000) + jiffies;
5207 do {
5208 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5209 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5210 if (!db_ready)
5211 msleep(2);
5212
5213 if (time_after(jiffies, timeout)) {
5214 rc = MBXERR_ERROR;
5215 goto exit;
5216 }
5217 } while (!db_ready);
5218
5219 /*
5220 * Read the CQ to ensure the mailbox has completed.
5221 * If so, update the mailbox status so that the upper layers
5222 * can complete the request normally.
5223 */
5224 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5225 sizeof(struct lpfc_mqe));
5226 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5227 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5228 sizeof(struct lpfc_mcqe));
5229 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5230
5231 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5232 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5233 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5234 rc = MBXERR_ERROR;
5235 }
5236
5237 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5238 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5239 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5240 " x%x x%x CQ: x%x x%x x%x x%x\n",
5241 mboxq->vport ? mboxq->vport->vpi : 0,
5242 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5243 bf_get(lpfc_mqe_status, mb),
5244 mb->un.mb_words[0], mb->un.mb_words[1],
5245 mb->un.mb_words[2], mb->un.mb_words[3],
5246 mb->un.mb_words[4], mb->un.mb_words[5],
5247 mb->un.mb_words[6], mb->un.mb_words[7],
5248 mb->un.mb_words[8], mb->un.mb_words[9],
5249 mb->un.mb_words[10], mb->un.mb_words[11],
5250 mb->un.mb_words[12], mboxq->mcqe.word0,
5251 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5252 mboxq->mcqe.trailer);
5253exit:
5254 /* We are holding the token, no needed for lock when release */
5255 spin_lock_irqsave(&phba->hbalock, iflag);
5256 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5257 phba->sli.mbox_active = NULL;
5258 spin_unlock_irqrestore(&phba->hbalock, iflag);
5259 return rc;
5260}
5261
5262/**
5263 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5264 * @phba: Pointer to HBA context object.
5265 * @pmbox: Pointer to mailbox object.
5266 * @flag: Flag indicating how the mailbox need to be processed.
5267 *
5268 * This function is called by discovery code and HBA management code to submit
5269 * a mailbox command to firmware with SLI-4 interface spec.
5270 *
5271 * Return codes the caller owns the mailbox command after the return of the
5272 * function.
5273 **/
5274static int
5275lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5276 uint32_t flag)
5277{
5278 struct lpfc_sli *psli = &phba->sli;
5279 unsigned long iflags;
5280 int rc;
5281
5282 /* Detect polling mode and jump to a handler */
5283 if (!phba->sli4_hba.intr_enable) {
5284 if (flag == MBX_POLL)
5285 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5286 else
5287 rc = -EIO;
5288 if (rc != MBX_SUCCESS)
5289 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5290 "(%d):2541 Mailbox command x%x "
5291 "(x%x) cannot issue Data: x%x x%x\n",
5292 mboxq->vport ? mboxq->vport->vpi : 0,
5293 mboxq->u.mb.mbxCommand,
5294 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5295 psli->sli_flag, flag);
5296 return rc;
5297 } else if (flag == MBX_POLL) {
5298 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5299 "(%d):2542 Try to issue mailbox command "
5300 "x%x (x%x) synchronously ahead of async"
5301 "mailbox command queue: x%x x%x\n",
5302 mboxq->vport ? mboxq->vport->vpi : 0,
5303 mboxq->u.mb.mbxCommand,
5304 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5305 psli->sli_flag, flag);
5306 /* Try to block the asynchronous mailbox posting */
5307 rc = lpfc_sli4_async_mbox_block(phba);
5308 if (!rc) {
5309 /* Successfully blocked, now issue sync mbox cmd */
5310 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5311 if (rc != MBX_SUCCESS)
5312 lpfc_printf_log(phba, KERN_ERR,
5313 LOG_MBOX | LOG_SLI,
5314 "(%d):2597 Mailbox command "
5315 "x%x (x%x) cannot issue "
5316 "Data: x%x x%x\n",
5317 mboxq->vport ?
5318 mboxq->vport->vpi : 0,
5319 mboxq->u.mb.mbxCommand,
5320 lpfc_sli4_mbox_opcode_get(phba,
5321 mboxq),
5322 psli->sli_flag, flag);
5323 /* Unblock the async mailbox posting afterward */
5324 lpfc_sli4_async_mbox_unblock(phba);
5325 }
5326 return rc;
5327 }
5328
5329 /* Now, interrupt mode asynchrous mailbox command */
5330 rc = lpfc_mbox_cmd_check(phba, mboxq);
5331 if (rc) {
5332 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5333 "(%d):2543 Mailbox command x%x (x%x) "
5334 "cannot issue Data: x%x x%x\n",
5335 mboxq->vport ? mboxq->vport->vpi : 0,
5336 mboxq->u.mb.mbxCommand,
5337 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5338 psli->sli_flag, flag);
5339 goto out_not_finished;
5340 }
5341 rc = lpfc_mbox_dev_check(phba);
5342 if (unlikely(rc)) {
5343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5344 "(%d):2544 Mailbox command x%x (x%x) "
5345 "cannot issue Data: x%x x%x\n",
5346 mboxq->vport ? mboxq->vport->vpi : 0,
5347 mboxq->u.mb.mbxCommand,
5348 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5349 psli->sli_flag, flag);
5350 goto out_not_finished;
5351 }
5352
5353 /* Put the mailbox command to the driver internal FIFO */
5354 psli->slistat.mbox_busy++;
5355 spin_lock_irqsave(&phba->hbalock, iflags);
5356 lpfc_mbox_put(phba, mboxq);
5357 spin_unlock_irqrestore(&phba->hbalock, iflags);
5358 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5359 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5360 "x%x (x%x) x%x x%x x%x\n",
5361 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5362 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5363 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5364 phba->pport->port_state,
5365 psli->sli_flag, MBX_NOWAIT);
5366 /* Wake up worker thread to transport mailbox command from head */
5367 lpfc_worker_wake_up(phba);
5368
5369 return MBX_BUSY;
5370
5371out_not_finished:
5372 return MBX_NOT_FINISHED;
5373}
5374
5375/**
5376 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5377 * @phba: Pointer to HBA context object.
5378 *
5379 * This function is called by worker thread to send a mailbox command to
5380 * SLI4 HBA firmware.
5381 *
5382 **/
5383int
5384lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5385{
5386 struct lpfc_sli *psli = &phba->sli;
5387 LPFC_MBOXQ_t *mboxq;
5388 int rc = MBX_SUCCESS;
5389 unsigned long iflags;
5390 struct lpfc_mqe *mqe;
5391 uint32_t mbx_cmnd;
5392
5393 /* Check interrupt mode before post async mailbox command */
5394 if (unlikely(!phba->sli4_hba.intr_enable))
5395 return MBX_NOT_FINISHED;
5396
5397 /* Check for mailbox command service token */
5398 spin_lock_irqsave(&phba->hbalock, iflags);
5399 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5400 spin_unlock_irqrestore(&phba->hbalock, iflags);
5401 return MBX_NOT_FINISHED;
5402 }
5403 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5404 spin_unlock_irqrestore(&phba->hbalock, iflags);
5405 return MBX_NOT_FINISHED;
5406 }
5407 if (unlikely(phba->sli.mbox_active)) {
5408 spin_unlock_irqrestore(&phba->hbalock, iflags);
5409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5410 "0384 There is pending active mailbox cmd\n");
5411 return MBX_NOT_FINISHED;
5412 }
5413 /* Take the mailbox command service token */
5414 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5415
5416 /* Get the next mailbox command from head of queue */
5417 mboxq = lpfc_mbox_get(phba);
5418
5419 /* If no more mailbox command waiting for post, we're done */
5420 if (!mboxq) {
5421 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5422 spin_unlock_irqrestore(&phba->hbalock, iflags);
5423 return MBX_SUCCESS;
5424 }
5425 phba->sli.mbox_active = mboxq;
5426 spin_unlock_irqrestore(&phba->hbalock, iflags);
5427
5428 /* Check device readiness for posting mailbox command */
5429 rc = lpfc_mbox_dev_check(phba);
5430 if (unlikely(rc))
5431 /* Driver clean routine will clean up pending mailbox */
5432 goto out_not_finished;
5433
5434 /* Prepare the mbox command to be posted */
5435 mqe = &mboxq->u.mqe;
5436 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5437
5438 /* Start timer for the mbox_tmo and log some mailbox post messages */
5439 mod_timer(&psli->mbox_tmo, (jiffies +
5440 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5441
5442 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5443 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5444 "x%x x%x\n",
5445 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5446 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5447 phba->pport->port_state, psli->sli_flag);
5448
5449 if (mbx_cmnd != MBX_HEARTBEAT) {
5450 if (mboxq->vport) {
5451 lpfc_debugfs_disc_trc(mboxq->vport,
5452 LPFC_DISC_TRC_MBOX_VPORT,
5453 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5454 mbx_cmnd, mqe->un.mb_words[0],
5455 mqe->un.mb_words[1]);
5456 } else {
5457 lpfc_debugfs_disc_trc(phba->pport,
5458 LPFC_DISC_TRC_MBOX,
5459 "MBOX Send: cmd:x%x mb:x%x x%x",
5460 mbx_cmnd, mqe->un.mb_words[0],
5461 mqe->un.mb_words[1]);
5462 }
5463 }
5464 psli->slistat.mbox_cmd++;
5465
5466 /* Post the mailbox command to the port */
5467 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5468 if (rc != MBX_SUCCESS) {
5469 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5470 "(%d):2533 Mailbox command x%x (x%x) "
5471 "cannot issue Data: x%x x%x\n",
5472 mboxq->vport ? mboxq->vport->vpi : 0,
5473 mboxq->u.mb.mbxCommand,
5474 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5475 psli->sli_flag, MBX_NOWAIT);
5476 goto out_not_finished;
5477 }
5478
5479 return rc;
5480
5481out_not_finished:
5482 spin_lock_irqsave(&phba->hbalock, iflags);
5483 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5484 __lpfc_mbox_cmpl_put(phba, mboxq);
5485 /* Release the token */
5486 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5487 phba->sli.mbox_active = NULL;
5488 spin_unlock_irqrestore(&phba->hbalock, iflags);
5489
5490 return MBX_NOT_FINISHED;
5491}
5492
5493/**
5494 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5495 * @phba: Pointer to HBA context object.
5496 * @pmbox: Pointer to mailbox object.
5497 * @flag: Flag indicating how the mailbox need to be processed.
5498 *
5499 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5500 * the API jump table function pointer from the lpfc_hba struct.
5501 *
5502 * Return codes the caller owns the mailbox command after the return of the
5503 * function.
5504 **/
5505int
5506lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5507{
5508 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5509}
5510
5511/**
5512 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5513 * @phba: The hba struct for which this call is being executed.
5514 * @dev_grp: The HBA PCI-Device group number.
5515 *
5516 * This routine sets up the mbox interface API function jump table in @phba
5517 * struct.
5518 * Returns: 0 - success, -ENODEV - failure.
5519 **/
5520int
5521lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5522{
5523
5524 switch (dev_grp) {
5525 case LPFC_PCI_DEV_LP:
5526 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5527 phba->lpfc_sli_handle_slow_ring_event =
5528 lpfc_sli_handle_slow_ring_event_s3;
5529 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5530 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5531 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5532 break;
5533 case LPFC_PCI_DEV_OC:
5534 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5535 phba->lpfc_sli_handle_slow_ring_event =
5536 lpfc_sli_handle_slow_ring_event_s4;
5537 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5538 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5539 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5540 break;
5541 default:
5542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5543 "1420 Invalid HBA PCI-device group: 0x%x\n",
5544 dev_grp);
5545 return -ENODEV;
5546 break;
5547 }
5548 return 0;
5549}
5550
5551/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5552 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5553 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5554 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5600,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5600}
3702 5601
3703/** 5602/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5603 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5604 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5605 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5606 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5607 * @flag: Flag indicating if this command can be put into txq.
3709 * 5608 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5609 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5610 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5611 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5612 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5613 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5614 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5615 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5616 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5617 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5618 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5619 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5620 * This function is called with hbalock held. The function will return success
3722 * 5621 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5622 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5623 **/
3727static int 5624static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5625__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5626 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5627{
3731 struct lpfc_iocbq *nextiocb; 5628 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5629 IOCB_t *iocb;
5630 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5631
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5632 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5633 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5731,493 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5731 return IOCB_BUSY;
3834} 5732}
3835 5733
5734/**
5735 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5736 * @phba: Pointer to HBA context object.
5737 * @piocb: Pointer to command iocb.
5738 * @sglq: Pointer to the scatter gather queue object.
5739 *
5740 * This routine converts the bpl or bde that is in the IOCB
5741 * to a sgl list for the sli4 hardware. The physical address
5742 * of the bpl/bde is converted back to a virtual address.
5743 * If the IOCB contains a BPL then the list of BDE's is
5744 * converted to sli4_sge's. If the IOCB contains a single
5745 * BDE then it is converted to a single sli_sge.
5746 * The IOCB is still in cpu endianess so the contents of
5747 * the bpl can be used without byte swapping.
5748 *
5749 * Returns valid XRI = Success, NO_XRI = Failure.
5750**/
5751static uint16_t
5752lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5753 struct lpfc_sglq *sglq)
5754{
5755 uint16_t xritag = NO_XRI;
5756 struct ulp_bde64 *bpl = NULL;
5757 struct ulp_bde64 bde;
5758 struct sli4_sge *sgl = NULL;
5759 IOCB_t *icmd;
5760 int numBdes = 0;
5761 int i = 0;
5762
5763 if (!piocbq || !sglq)
5764 return xritag;
5765
5766 sgl = (struct sli4_sge *)sglq->sgl;
5767 icmd = &piocbq->iocb;
5768 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5769 numBdes = icmd->un.genreq64.bdl.bdeSize /
5770 sizeof(struct ulp_bde64);
5771 /* The addrHigh and addrLow fields within the IOCB
5772 * have not been byteswapped yet so there is no
5773 * need to swap them back.
5774 */
5775 bpl = (struct ulp_bde64 *)
5776 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5777
5778 if (!bpl)
5779 return xritag;
5780
5781 for (i = 0; i < numBdes; i++) {
5782 /* Should already be byte swapped. */
5783 sgl->addr_hi = bpl->addrHigh;
5784 sgl->addr_lo = bpl->addrLow;
5785 /* swap the size field back to the cpu so we
5786 * can assign it to the sgl.
5787 */
5788 bde.tus.w = le32_to_cpu(bpl->tus.w);
5789 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5790 if ((i+1) == numBdes)
5791 bf_set(lpfc_sli4_sge_last, sgl, 1);
5792 else
5793 bf_set(lpfc_sli4_sge_last, sgl, 0);
5794 sgl->word2 = cpu_to_le32(sgl->word2);
5795 sgl->word3 = cpu_to_le32(sgl->word3);
5796 bpl++;
5797 sgl++;
5798 }
5799 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5800 /* The addrHigh and addrLow fields of the BDE have not
5801 * been byteswapped yet so they need to be swapped
5802 * before putting them in the sgl.
5803 */
5804 sgl->addr_hi =
5805 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5806 sgl->addr_lo =
5807 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5808 bf_set(lpfc_sli4_sge_len, sgl,
5809 icmd->un.genreq64.bdl.bdeSize);
5810 bf_set(lpfc_sli4_sge_last, sgl, 1);
5811 sgl->word2 = cpu_to_le32(sgl->word2);
5812 sgl->word3 = cpu_to_le32(sgl->word3);
5813 }
5814 return sglq->sli4_xritag;
5815}
5816
5817/**
5818 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5819 * @phba: Pointer to HBA context object.
5820 * @piocb: Pointer to command iocb.
5821 *
5822 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5823 * distribution.
5824 *
5825 * Return: index into SLI4 fast-path FCP queue index.
5826 **/
5827static uint32_t
5828lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5829{
5830 static uint32_t fcp_qidx;
5831
5832 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5833}
5834
5835/**
5836 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5837 * @phba: Pointer to HBA context object.
5838 * @piocb: Pointer to command iocb.
5839 * @wqe: Pointer to the work queue entry.
5840 *
5841 * This routine converts the iocb command to its Work Queue Entry
5842 * equivalent. The wqe pointer should not have any fields set when
5843 * this routine is called because it will memcpy over them.
5844 * This routine does not set the CQ_ID or the WQEC bits in the
5845 * wqe.
5846 *
5847 * Returns: 0 = Success, IOCB_ERROR = Failure.
5848 **/
5849static int
5850lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5851 union lpfc_wqe *wqe)
5852{
5853 uint32_t payload_len = 0;
5854 uint8_t ct = 0;
5855 uint32_t fip;
5856 uint32_t abort_tag;
5857 uint8_t command_type = ELS_COMMAND_NON_FIP;
5858 uint8_t cmnd;
5859 uint16_t xritag;
5860 struct ulp_bde64 *bpl = NULL;
5861
5862 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5863 /* The fcp commands will set command type */
5864 if (iocbq->iocb_flag & LPFC_IO_FCP)
5865 command_type = FCP_COMMAND;
5866 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
5867 command_type = ELS_COMMAND_FIP;
5868 else
5869 command_type = ELS_COMMAND_NON_FIP;
5870
5871 /* Some of the fields are in the right position already */
5872 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5873 abort_tag = (uint32_t) iocbq->iotag;
5874 xritag = iocbq->sli4_xritag;
5875 wqe->words[7] = 0; /* The ct field has moved so reset */
5876 /* words0-2 bpl convert bde */
5877 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5878 bpl = (struct ulp_bde64 *)
5879 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5880 if (!bpl)
5881 return IOCB_ERROR;
5882
5883 /* Should already be byte swapped. */
5884 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5885 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5886 /* swap the size field back to the cpu so we
5887 * can assign it to the sgl.
5888 */
5889 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5890 payload_len = wqe->generic.bde.tus.f.bdeSize;
5891 } else
5892 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5893
5894 iocbq->iocb.ulpIoTag = iocbq->iotag;
5895 cmnd = iocbq->iocb.ulpCommand;
5896
5897 switch (iocbq->iocb.ulpCommand) {
5898 case CMD_ELS_REQUEST64_CR:
5899 if (!iocbq->iocb.ulpLe) {
5900 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5901 "2007 Only Limited Edition cmd Format"
5902 " supported 0x%x\n",
5903 iocbq->iocb.ulpCommand);
5904 return IOCB_ERROR;
5905 }
5906 wqe->els_req.payload_len = payload_len;
5907 /* Els_reguest64 has a TMO */
5908 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910 /* Need a VF for word 4 set the vf bit*/
5911 bf_set(els_req64_vf, &wqe->els_req, 0);
5912 /* And a VFID for word 12 */
5913 bf_set(els_req64_vfid, &wqe->els_req, 0);
5914 /*
5915 * Set ct field to 3, indicates that the context_tag field
5916 * contains the FCFI and remote N_Port_ID is
5917 * in word 5.
5918 */
5919
5920 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5921 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5922 iocbq->iocb.ulpContext);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5925 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5926 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5927 break;
5928 case CMD_XMIT_SEQUENCE64_CR:
5929 /* word3 iocb=io_tag32 wqe=payload_offset */
5930 /* payload offset used for multilpe outstanding
5931 * sequences on the same exchange
5932 */
5933 wqe->words[3] = 0;
5934 /* word4 relative_offset memcpy */
5935 /* word5 r_ctl/df_ctl memcpy */
5936 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5937 wqe->xmit_sequence.xmit_len = payload_len;
5938 break;
5939 case CMD_XMIT_BCAST64_CN:
5940 /* word3 iocb=iotag32 wqe=payload_len */
5941 wqe->words[3] = 0; /* no definition for this in wqe */
5942 /* word4 iocb=rsvd wqe=rsvd */
5943 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5944 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5945 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5946 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5947 break;
5948 case CMD_FCP_IWRITE64_CR:
5949 command_type = FCP_COMMAND_DATA_OUT;
5950 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5951 * confusing.
5952 * word3 is payload_len: byte offset to the sgl entry for the
5953 * fcp_command.
5954 * word4 is total xfer len, same as the IOCB->ulpParameter.
5955 * word5 is initial xfer len 0 = wait for xfer-ready
5956 */
5957
5958 /* Always wait for xfer-ready before sending data */
5959 wqe->fcp_iwrite.initial_xfer_len = 0;
5960 /* word 4 (xfer length) should have been set on the memcpy */
5961
5962 /* allow write to fall through to read */
5963 case CMD_FCP_IREAD64_CR:
5964 /* FCP_CMD is always the 1st sgl entry */
5965 wqe->fcp_iread.payload_len =
5966 payload_len + sizeof(struct fcp_rsp);
5967
5968 /* word 4 (xfer length) should have been set on the memcpy */
5969
5970 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5971 iocbq->iocb.ulpFCP2Rcvy);
5972 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5973 /* The XC bit and the XS bit are similar. The driver never
5974 * tracked whether or not the exchange was previouslly open.
5975 * XC = Exchange create, 0 is create. 1 is already open.
5976 * XS = link cmd: 1 do not close the exchange after command.
5977 * XS = 0 close exchange when command completes.
5978 * The only time we would not set the XC bit is when the XS bit
5979 * is set and we are sending our 2nd or greater command on
5980 * this exchange.
5981 */
5982 /* Always open the exchange */
5983 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5984
5985 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5986 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5987 break;
5988 case CMD_FCP_ICMND64_CR:
5989 /* Always open the exchange */
5990 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5991
5992 wqe->words[4] = 0;
5993 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5994 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5995 break;
5996 case CMD_GEN_REQUEST64_CR:
5997 /* word3 command length is described as byte offset to the
5998 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5999 * sgl[0] = cmnd
6000 * sgl[1] = rsp.
6001 *
6002 */
6003 wqe->gen_req.command_len = payload_len;
6004 /* Word4 parameter copied in the memcpy */
6005 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6006 /* word6 context tag copied in memcpy */
6007 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
6008 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
6009 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6010 "2015 Invalid CT %x command 0x%x\n",
6011 ct, iocbq->iocb.ulpCommand);
6012 return IOCB_ERROR;
6013 }
6014 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
6015 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
6016 iocbq->iocb.ulpTimeout);
6017
6018 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
6019 command_type = OTHER_COMMAND;
6020 break;
6021 case CMD_XMIT_ELS_RSP64_CX:
6022 /* words0-2 BDE memcpy */
6023 /* word3 iocb=iotag32 wqe=rsvd */
6024 wqe->words[3] = 0;
6025 /* word4 iocb=did wge=rsvd. */
6026 wqe->words[4] = 0;
6027 /* word5 iocb=rsvd wge=did */
6028 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
6029 iocbq->iocb.un.elsreq64.remoteID);
6030
6031 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6032 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6033
6034 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
6035 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
6036 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6037 bf_set(lpfc_wqe_gen_context, &wqe->generic,
6038 iocbq->vport->vpi + phba->vpi_base);
6039 command_type = OTHER_COMMAND;
6040 break;
6041 case CMD_CLOSE_XRI_CN:
6042 case CMD_ABORT_XRI_CN:
6043 case CMD_ABORT_XRI_CX:
6044 /* words 0-2 memcpy should be 0 rserved */
6045 /* port will send abts */
6046 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6047 /*
6048 * The link is down so the fw does not need to send abts
6049 * on the wire.
6050 */
6051 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
6052 else
6053 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6054 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6055 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6056 wqe->words[5] = 0;
6057 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6058 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6059 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6060 wqe->generic.abort_tag = abort_tag;
6061 /*
6062 * The abort handler will send us CMD_ABORT_XRI_CN or
6063 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
6064 */
6065 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
6066 cmnd = CMD_ABORT_XRI_CX;
6067 command_type = OTHER_COMMAND;
6068 xritag = 0;
6069 break;
6070 case CMD_XRI_ABORTED_CX:
6071 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6072 /* words0-2 are all 0's no bde */
6073 /* word3 and word4 are rsvrd */
6074 wqe->words[3] = 0;
6075 wqe->words[4] = 0;
6076 /* word5 iocb=rsvd wge=did */
6077 /* There is no remote port id in the IOCB? */
6078 /* Let this fall through and fail */
6079 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
6080 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
6081 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
6082 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
6083 default:
6084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6085 "2014 Invalid command 0x%x\n",
6086 iocbq->iocb.ulpCommand);
6087 return IOCB_ERROR;
6088 break;
6089
6090 }
6091 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
6092 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
6093 wqe->generic.abort_tag = abort_tag;
6094 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
6095 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
6096 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
6097 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
6098
6099 return 0;
6100}
6101
6102/**
6103 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
6104 * @phba: Pointer to HBA context object.
6105 * @ring_number: SLI ring number to issue iocb on.
6106 * @piocb: Pointer to command iocb.
6107 * @flag: Flag indicating if this command can be put into txq.
6108 *
6109 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6110 * an iocb command to an HBA with SLI-4 interface spec.
6111 *
6112 * This function is called with hbalock held. The function will return success
6113 * after it successfully submit the iocb to firmware or after adding to the
6114 * txq.
6115 **/
6116static int
6117__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6118 struct lpfc_iocbq *piocb, uint32_t flag)
6119{
6120 struct lpfc_sglq *sglq;
6121 uint16_t xritag;
6122 union lpfc_wqe wqe;
6123 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6124 uint32_t fcp_wqidx;
6125
6126 if (piocb->sli4_xritag == NO_XRI) {
6127 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6128 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6129 sglq = NULL;
6130 else {
6131 sglq = __lpfc_sli_get_sglq(phba);
6132 if (!sglq)
6133 return IOCB_ERROR;
6134 piocb->sli4_xritag = sglq->sli4_xritag;
6135 }
6136 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6137 sglq = NULL; /* These IO's already have an XRI and
6138 * a mapped sgl.
6139 */
6140 } else {
6141 /* This is a continuation of a commandi,(CX) so this
6142 * sglq is on the active list
6143 */
6144 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6145 if (!sglq)
6146 return IOCB_ERROR;
6147 }
6148
6149 if (sglq) {
6150 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6151 if (xritag != sglq->sli4_xritag)
6152 return IOCB_ERROR;
6153 }
6154
6155 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6156 return IOCB_ERROR;
6157
6158 if (piocb->iocb_flag & LPFC_IO_FCP) {
6159 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6160 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6161 return IOCB_ERROR;
6162 } else {
6163 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6164 return IOCB_ERROR;
6165 }
6166 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6167
6168 return 0;
6169}
6170
6171/**
6172 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6173 *
6174 * This routine wraps the actual lockless version for issusing IOCB function
6175 * pointer from the lpfc_hba struct.
6176 *
6177 * Return codes:
6178 * IOCB_ERROR - Error
6179 * IOCB_SUCCESS - Success
6180 * IOCB_BUSY - Busy
6181 **/
6182static inline int
6183__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6184 struct lpfc_iocbq *piocb, uint32_t flag)
6185{
6186 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6187}
6188
6189/**
6190 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6191 * @phba: The hba struct for which this call is being executed.
6192 * @dev_grp: The HBA PCI-Device group number.
6193 *
6194 * This routine sets up the SLI interface API function jump table in @phba
6195 * struct.
6196 * Returns: 0 - success, -ENODEV - failure.
6197 **/
6198int
6199lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6200{
6201
6202 switch (dev_grp) {
6203 case LPFC_PCI_DEV_LP:
6204 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6205 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6206 break;
6207 case LPFC_PCI_DEV_OC:
6208 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6209 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6210 break;
6211 default:
6212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6213 "1419 Invalid HBA PCI-device group: 0x%x\n",
6214 dev_grp);
6215 return -ENODEV;
6216 break;
6217 }
6218 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6219 return 0;
6220}
3836 6221
3837/** 6222/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6223 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6233,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6233 * functions which do not hold hbalock.
3849 **/ 6234 **/
3850int 6235int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6236lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6237 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6238{
3854 unsigned long iflags; 6239 unsigned long iflags;
3855 int rc; 6240 int rc;
3856 6241
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6242 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6243 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6244 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6245
3861 return rc; 6246 return rc;
@@ -4148,6 +6533,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6533}
4149 6534
4150/** 6535/**
6536 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6537 * @phba: Pointer to HBA context object.
6538 *
6539 * This routine flushes the mailbox command subsystem. It will unconditionally
6540 * flush all the mailbox commands in the three possible stages in the mailbox
6541 * command sub-system: pending mailbox command queue; the outstanding mailbox
6542 * command; and completed mailbox command queue. It is caller's responsibility
6543 * to make sure that the driver is in the proper state to flush the mailbox
6544 * command sub-system. Namely, the posting of mailbox commands into the
6545 * pending mailbox command queue from the various clients must be stopped;
6546 * either the HBA is in a state that it will never works on the outstanding
6547 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6548 * mailbox command has been completed.
6549 **/
6550static void
6551lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6552{
6553 LIST_HEAD(completions);
6554 struct lpfc_sli *psli = &phba->sli;
6555 LPFC_MBOXQ_t *pmb;
6556 unsigned long iflag;
6557
6558 /* Flush all the mailbox commands in the mbox system */
6559 spin_lock_irqsave(&phba->hbalock, iflag);
6560 /* The pending mailbox command queue */
6561 list_splice_init(&phba->sli.mboxq, &completions);
6562 /* The outstanding active mailbox command */
6563 if (psli->mbox_active) {
6564 list_add_tail(&psli->mbox_active->list, &completions);
6565 psli->mbox_active = NULL;
6566 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6567 }
6568 /* The completed mailbox command queue */
6569 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6570 spin_unlock_irqrestore(&phba->hbalock, iflag);
6571
6572 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6573 while (!list_empty(&completions)) {
6574 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6575 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6576 if (pmb->mbox_cmpl)
6577 pmb->mbox_cmpl(phba, pmb);
6578 }
6579}
6580
6581/**
4151 * lpfc_sli_host_down - Vport cleanup function 6582 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6583 * @vport: Pointer to virtual port object.
4153 * 6584 *
@@ -4240,9 +6671,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6671 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6672 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6673 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6674 unsigned long flags = 0;
6675 int i;
6676
6677 /* Shutdown the mailbox command sub-system */
6678 lpfc_sli_mbox_sys_shutdown(phba);
4246 6679
4247 lpfc_hba_down_prep(phba); 6680 lpfc_hba_down_prep(phba);
4248 6681
@@ -4287,28 +6720,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6720
4288 /* Return any active mbox cmds */ 6721 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6722 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6723
4292 spin_lock(&phba->pport->work_port_lock); 6724 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6725 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6726 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6727
4296 /* Return any pending or completed mbox cmds */ 6728 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6729}
4298 if (psli->mbox_active) { 6730
4299 list_add_tail(&psli->mbox_active->list, &completions); 6731/**
4300 psli->mbox_active = NULL; 6732 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6733 * @phba: Pointer to HBA context object.
4302 } 6734 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6735 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6736 * shutting down the SLI4 HBA FCoE function. This function is called with no
6737 * lock held and always returns 1.
6738 *
6739 * This function does the following to cleanup driver FCoE function resources:
6740 * - Free discovery resources for each virtual port
6741 * - Cleanup any pending fabric iocbs
6742 * - Iterate through the iocb txq and free each entry in the list.
6743 * - Free up any buffer posted to the HBA.
6744 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6745 * - Free mailbox commands in the mailbox queue.
6746 **/
6747int
6748lpfc_sli4_hba_down(struct lpfc_hba *phba)
6749{
6750 /* Stop the SLI4 device port */
6751 lpfc_stop_port(phba);
6752
6753 /* Tear down the queues in the HBA */
6754 lpfc_sli4_queue_unset(phba);
6755
6756 /* unregister default FCFI from the HBA */
6757 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6758
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6759 return 1;
4313} 6760}
4314 6761
@@ -4639,7 +7086,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 7086 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7087 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7088 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7089 if (phba->sli_rev == LPFC_SLI_REV4)
7090 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7091 else
7092 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 7093 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 7094 iabt->ulpClass = icmd->ulpClass;
4645 7095
@@ -4655,7 +7105,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 7105 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 7106 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7107 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7108 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7109
4660 if (retval) 7110 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7111 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7288,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7288 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7289 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7290 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7291 if (phba->sli_rev == LPFC_SLI_REV4)
7292 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7293 else
7294 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7295 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7296 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7297 abtsiocb->vport = phba->pport;
@@ -4850,7 +7303,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7303
4851 /* Setup callback routine and issue the command. */ 7304 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7305 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7306 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7307 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7308 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7309 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7310 errcnt++;
@@ -4900,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4900} 7354}
4901 7355
4902/** 7356/**
7357 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7358 * @phba: Pointer to HBA context object..
7359 * @piocbq: Pointer to command iocb.
7360 * @flag: Flag to test.
7361 *
7362 * This routine grabs the hbalock and then test the iocb_flag to
7363 * see if the passed in flag is set.
7364 * Returns:
7365 * 1 if flag is set.
7366 * 0 if flag is not set.
7367 **/
7368static int
7369lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7370 struct lpfc_iocbq *piocbq, uint32_t flag)
7371{
7372 unsigned long iflags;
7373 int ret;
7374
7375 spin_lock_irqsave(&phba->hbalock, iflags);
7376 ret = piocbq->iocb_flag & flag;
7377 spin_unlock_irqrestore(&phba->hbalock, iflags);
7378 return ret;
7379
7380}
7381
7382/**
4903 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 7383 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
4904 * @phba: Pointer to HBA context object.. 7384 * @phba: Pointer to HBA context object..
4905 * @pring: Pointer to sli ring. 7385 * @pring: Pointer to sli ring.
@@ -4931,7 +7411,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7411 **/
4932int 7412int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7413lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7414 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7415 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7416 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7417 uint32_t timeout)
@@ -4962,11 +7442,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7442 readl(phba->HCregaddr); /* flush */
4963 } 7443 }
4964 7444
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7445 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7446 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7447 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7448 timeleft = wait_event_timeout(done_q,
4969 piocb->iocb_flag & LPFC_IO_WAKE, 7449 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
4970 timeout_req); 7450 timeout_req);
4971 7451
4972 if (piocb->iocb_flag & LPFC_IO_WAKE) { 7452 if (piocb->iocb_flag & LPFC_IO_WAKE) {
@@ -5077,53 +7557,150 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7557}
5078 7558
5079/** 7559/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7560 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7561 * @phba: Pointer to HBA context.
5082 * 7562 *
5083 * This function is called to cleanup any pending mailbox 7563 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7564 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7565 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7566 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7567 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7568 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7569 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7570 * as with offline or HBA function reset), this routine will wait for the
7571 * outstanding mailbox command to complete before invoking the mailbox
7572 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7573 **/
5091int 7574void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7575lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7576{
5094 struct lpfc_vport *vport = phba->pport; 7577 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7578 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7579 unsigned long timeout;
5097 7580
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7581 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7582 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7583 spin_unlock_irq(&phba->hbalock);
5101 7584
5102 /* 7585 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7586 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7587 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7588 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7589 spin_unlock_irq(&phba->hbalock);
7590 /* Determine how long we might wait for the active mailbox
7591 * command to be gracefully completed by firmware.
7592 */
7593 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7594 1000) + jiffies;
7595 while (phba->sli.mbox_active) {
7596 /* Check active mailbox complete status every 2ms */
7597 msleep(2);
7598 if (time_after(jiffies, timeout))
7599 /* Timeout, let the mailbox flush routine to
7600 * forcefully release active mailbox command
7601 */
7602 break;
7603 }
7604 }
7605 lpfc_sli_mbox_sys_flush(phba);
7606}
5111 7607
5112 if (ha_copy & HA_MBATT) 7608/**
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7609 * lpfc_sli_eratt_read - read sli-3 error attention events
5114 i = 0; 7610 * @phba: Pointer to HBA context.
7611 *
7612 * This function is called to read the SLI3 device error attention registers
7613 * for possible error attention events. The caller must hold the hostlock
7614 * with spin_lock_irq().
7615 *
7616 * This fucntion returns 1 when there is Error Attention in the Host Attention
7617 * Register and returns 0 otherwise.
7618 **/
7619static int
7620lpfc_sli_eratt_read(struct lpfc_hba *phba)
7621{
7622 uint32_t ha_copy;
7623
7624 /* Read chip Host Attention (HA) register */
7625 ha_copy = readl(phba->HAregaddr);
7626 if (ha_copy & HA_ERATT) {
7627 /* Read host status register to retrieve error event */
7628 lpfc_sli_read_hs(phba);
5115 7629
5116 msleep(1); 7630 /* Check if there is a deferred error condition is active */
7631 if ((HS_FFER1 & phba->work_hs) &&
7632 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7633 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7634 phba->hba_flag |= DEFER_ERATT;
7635 /* Clear all interrupt enable conditions */
7636 writel(0, phba->HCregaddr);
7637 readl(phba->HCregaddr);
7638 }
7639
7640 /* Set the driver HA work bitmap */
7641 phba->work_ha |= HA_ERATT;
7642 /* Indicate polling handles this ERATT */
7643 phba->hba_flag |= HBA_ERATT_HANDLED;
7644 return 1;
5117 } 7645 }
7646 return 0;
7647}
7648
7649/**
7650 * lpfc_sli4_eratt_read - read sli-4 error attention events
7651 * @phba: Pointer to HBA context.
7652 *
7653 * This function is called to read the SLI4 device error attention registers
7654 * for possible error attention events. The caller must hold the hostlock
7655 * with spin_lock_irq().
7656 *
7657 * This fucntion returns 1 when there is Error Attention in the Host Attention
7658 * Register and returns 0 otherwise.
7659 **/
7660static int
7661lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7662{
7663 uint32_t uerr_sta_hi, uerr_sta_lo;
7664 uint32_t onlnreg0, onlnreg1;
5118 7665
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7666 /* For now, use the SLI4 device internal unrecoverable error
7667 * registers for error attention. This can be changed later.
7668 */
7669 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7670 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7671 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7672 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7673 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7674 if (uerr_sta_lo || uerr_sta_hi) {
7675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7676 "1423 HBA Unrecoverable error: "
7677 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7678 "online0_reg=0x%x, online1_reg=0x%x\n",
7679 uerr_sta_lo, uerr_sta_hi,
7680 onlnreg0, onlnreg1);
7681 /* TEMP: as the driver error recover logic is not
7682 * fully developed, we just log the error message
7683 * and the device error attention action is now
7684 * temporarily disabled.
7685 */
7686 return 0;
7687 phba->work_status[0] = uerr_sta_lo;
7688 phba->work_status[1] = uerr_sta_hi;
7689 /* Set the driver HA work bitmap */
7690 phba->work_ha |= HA_ERATT;
7691 /* Indicate polling handles this ERATT */
7692 phba->hba_flag |= HBA_ERATT_HANDLED;
7693 return 1;
7694 }
7695 }
7696 return 0;
5120} 7697}
5121 7698
5122/** 7699/**
5123 * lpfc_sli_check_eratt - check error attention events 7700 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7701 * @phba: Pointer to HBA context.
5125 * 7702 *
5126 * This function is called form timer soft interrupt context to check HBA's 7703 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7704 * error attention register bit for error attention events.
5128 * 7705 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7706 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7711,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7711{
5135 uint32_t ha_copy; 7712 uint32_t ha_copy;
5136 7713
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7714 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7715 * here. The brdkill function will do this.
5143 */ 7716 */
@@ -5161,56 +7734,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7734 return 0;
5162 } 7735 }
5163 7736
5164 /* Read chip Host Attention (HA) register */ 7737 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7738 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7739 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7740 return 0;
7741 }
7742
7743 switch (phba->sli_rev) {
7744 case LPFC_SLI_REV2:
7745 case LPFC_SLI_REV3:
7746 /* Read chip Host Attention (HA) register */
7747 ha_copy = lpfc_sli_eratt_read(phba);
7748 break;
7749 case LPFC_SLI_REV4:
7750 /* Read devcie Uncoverable Error (UERR) registers */
7751 ha_copy = lpfc_sli4_eratt_read(phba);
7752 break;
7753 default:
7754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7755 "0299 Invalid SLI revision (%d)\n",
7756 phba->sli_rev);
7757 ha_copy = 0;
7758 break;
5186 } 7759 }
5187 spin_unlock_irq(&phba->hbalock); 7760 spin_unlock_irq(&phba->hbalock);
7761
7762 return ha_copy;
7763}
7764
7765/**
7766 * lpfc_intr_state_check - Check device state for interrupt handling
7767 * @phba: Pointer to HBA context.
7768 *
7769 * This inline routine checks whether a device or its PCI slot is in a state
7770 * that the interrupt should be handled.
7771 *
7772 * This function returns 0 if the device or the PCI slot is in a state that
7773 * interrupt should be handled, otherwise -EIO.
7774 */
7775static inline int
7776lpfc_intr_state_check(struct lpfc_hba *phba)
7777{
7778 /* If the pci channel is offline, ignore all the interrupts */
7779 if (unlikely(pci_channel_offline(phba->pcidev)))
7780 return -EIO;
7781
7782 /* Update device level interrupt statistics */
7783 phba->sli.slistat.sli_intr++;
7784
7785 /* Ignore all interrupts during initialization. */
7786 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7787 return -EIO;
7788
5188 return 0; 7789 return 0;
5189} 7790}
5190 7791
5191/** 7792/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7793 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7794 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7795 * @dev_id: The device context pointer.
5195 * 7796 *
5196 * This function is directly called from the PCI layer as an interrupt 7797 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7798 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7799 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7800 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7801 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7802 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7803 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7804 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7805 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7806 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7807 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7808 * structures.
5208 * 7809 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7810 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7811 * returns IRQ_NONE.
5211 **/ 7812 **/
5212irqreturn_t 7813irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7814lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7815{
5215 struct lpfc_hba *phba; 7816 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7817 uint32_t ha_copy;
@@ -5240,13 +7841,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7841 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7842 */
5242 if (phba->intr_type == MSIX) { 7843 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7844 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7845 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7846 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7847 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7848 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7867,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7867 * interrupt.
5272 */ 7868 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7869 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7870 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7871 return IRQ_NONE;
5276 } 7872 }
5277 7873
@@ -5364,7 +7960,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7960
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7961 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7962 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7963 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7964 mbox = phba->mbox;
5369 vport = pmb->vport; 7965 vport = pmb->vport;
5370 7966
@@ -5434,7 +8030,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 8030 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 8031 "0350 rc should have"
5436 "been MBX_BUSY"); 8032 "been MBX_BUSY");
5437 goto send_current_mbox; 8033 if (rc != MBX_NOT_FINISHED)
8034 goto send_current_mbox;
5438 } 8035 }
5439 } 8036 }
5440 spin_lock_irqsave( 8037 spin_lock_irqsave(
@@ -5471,29 +8068,29 @@ send_current_mbox:
5471 } 8068 }
5472 return IRQ_HANDLED; 8069 return IRQ_HANDLED;
5473 8070
5474} /* lpfc_sp_intr_handler */ 8071} /* lpfc_sli_sp_intr_handler */
5475 8072
5476/** 8073/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 8074 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 8075 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 8076 * @dev_id: The device context pointer.
5480 * 8077 *
5481 * This function is directly called from the PCI layer as an interrupt 8078 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 8079 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 8080 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 8081 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 8082 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 8083 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 8084 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 8085 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 8086 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 8087 * It gets the hbalock to access and update SLI data structures.
5491 * 8088 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 8089 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 8090 * returns IRQ_NONE.
5494 **/ 8091 **/
5495irqreturn_t 8092irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 8093lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 8094{
5498 struct lpfc_hba *phba; 8095 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 8096 uint32_t ha_copy;
@@ -5513,13 +8110,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 8110 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 8111 */
5515 if (phba->intr_type == MSIX) { 8112 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 8113 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 8114 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 8115 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 8116 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 8117 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +8122,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 8122 * any interrupt.
5531 */ 8123 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8124 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 8125 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 8126 return IRQ_NONE;
5535 } 8127 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8128 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8158,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8158 }
5567 } 8159 }
5568 return IRQ_HANDLED; 8160 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8161} /* lpfc_sli_fp_intr_handler */
5570 8162
5571/** 8163/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8164 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8165 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8166 * @dev_id: The device context pointer.
5575 * 8167 *
5576 * This function is the device-level interrupt handler called from the PCI 8168 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8169 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8170 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8171 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8172 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8173 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8174 * function is called without any lock held. It gets the hbalock to access
8175 * and update SLI data structures.
5583 * 8176 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8177 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8178 * returns IRQ_NONE.
5586 **/ 8179 **/
5587irqreturn_t 8180irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8181lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8182{
5590 struct lpfc_hba *phba; 8183 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8184 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8193,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8193 if (unlikely(!phba))
5601 return IRQ_NONE; 8194 return IRQ_NONE;
5602 8195
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8196 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8197 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8198 return IRQ_NONE;
5613 8199
5614 spin_lock(&phba->hbalock); 8200 spin_lock(&phba->hbalock);
@@ -5650,7 +8236,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8236 status2 >>= (4*LPFC_ELS_RING);
5651 8237
5652 if (status1 || (status2 & HA_RXMASK)) 8238 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8239 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8240 else
5655 sp_irq_rc = IRQ_NONE; 8241 sp_irq_rc = IRQ_NONE;
5656 8242
@@ -5670,10 +8256,3321 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8256 status2 = 0;
5671 8257
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8258 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8259 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8260 else
5675 fp_irq_rc = IRQ_NONE; 8261 fp_irq_rc = IRQ_NONE;
5676 8262
5677 /* Return device-level interrupt handling status */ 8263 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8264 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8265} /* lpfc_sli_intr_handler */
8266
8267/**
8268 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8269 * @phba: pointer to lpfc hba data structure.
8270 *
8271 * This routine is invoked by the worker thread to process all the pending
8272 * SLI4 FCP abort XRI events.
8273 **/
8274void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8275{
8276 struct lpfc_cq_event *cq_event;
8277
8278 /* First, declare the fcp xri abort event has been handled */
8279 spin_lock_irq(&phba->hbalock);
8280 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8281 spin_unlock_irq(&phba->hbalock);
8282 /* Now, handle all the fcp xri abort events */
8283 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8284 /* Get the first event from the head of the event queue */
8285 spin_lock_irq(&phba->hbalock);
8286 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8287 cq_event, struct lpfc_cq_event, list);
8288 spin_unlock_irq(&phba->hbalock);
8289 /* Notify aborted XRI for FCP work queue */
8290 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8291 /* Free the event processed back to the free pool */
8292 lpfc_sli4_cq_event_release(phba, cq_event);
8293 }
8294}
8295
8296/**
8297 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8298 * @phba: pointer to lpfc hba data structure.
8299 *
8300 * This routine is invoked by the worker thread to process all the pending
8301 * SLI4 els abort xri events.
8302 **/
8303void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8304{
8305 struct lpfc_cq_event *cq_event;
8306
8307 /* First, declare the els xri abort event has been handled */
8308 spin_lock_irq(&phba->hbalock);
8309 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8310 spin_unlock_irq(&phba->hbalock);
8311 /* Now, handle all the els xri abort events */
8312 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8313 /* Get the first event from the head of the event queue */
8314 spin_lock_irq(&phba->hbalock);
8315 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8316 cq_event, struct lpfc_cq_event, list);
8317 spin_unlock_irq(&phba->hbalock);
8318 /* Notify aborted XRI for ELS work queue */
8319 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8320 /* Free the event processed back to the free pool */
8321 lpfc_sli4_cq_event_release(phba, cq_event);
8322 }
8323}
8324
8325static void
8326lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8327 struct lpfc_iocbq *pIocbOut,
8328 struct lpfc_wcqe_complete *wcqe)
8329{
8330 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8331
8332 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8333 sizeof(struct lpfc_iocbq) - offset);
8334 memset(&pIocbIn->sli4_info, 0,
8335 sizeof(struct lpfc_sli4_rspiocb_info));
8336 /* Map WCQE parameters into irspiocb parameters */
8337 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8338 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8339 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8340 pIocbIn->iocb.un.fcpi.fcpi_parm =
8341 pIocbOut->iocb.un.fcpi.fcpi_parm -
8342 wcqe->total_data_placed;
8343 else
8344 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8345 else
8346 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8347 /* Load in additional WCQE parameters */
8348 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8349 pIocbIn->sli4_info.bfield = 0;
8350 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8351 pIocbIn->sli4_info.bfield |= LPFC_XB;
8352 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8353 pIocbIn->sli4_info.bfield |= LPFC_PV;
8354 pIocbIn->sli4_info.priority =
8355 bf_get(lpfc_wcqe_c_priority, wcqe);
8356 }
8357}
8358
8359/**
8360 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8361 * @phba: Pointer to HBA context object.
8362 * @cqe: Pointer to mailbox completion queue entry.
8363 *
8364 * This routine process a mailbox completion queue entry with asynchrous
8365 * event.
8366 *
8367 * Return: true if work posted to worker thread, otherwise false.
8368 **/
8369static bool
8370lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8371{
8372 struct lpfc_cq_event *cq_event;
8373 unsigned long iflags;
8374
8375 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8376 "0392 Async Event: word0:x%x, word1:x%x, "
8377 "word2:x%x, word3:x%x\n", mcqe->word0,
8378 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8379
8380 /* Allocate a new internal CQ_EVENT entry */
8381 cq_event = lpfc_sli4_cq_event_alloc(phba);
8382 if (!cq_event) {
8383 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8384 "0394 Failed to allocate CQ_EVENT entry\n");
8385 return false;
8386 }
8387
8388 /* Move the CQE into an asynchronous event entry */
8389 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8390 spin_lock_irqsave(&phba->hbalock, iflags);
8391 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8392 /* Set the async event flag */
8393 phba->hba_flag |= ASYNC_EVENT;
8394 spin_unlock_irqrestore(&phba->hbalock, iflags);
8395
8396 return true;
8397}
8398
8399/**
8400 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8401 * @phba: Pointer to HBA context object.
8402 * @cqe: Pointer to mailbox completion queue entry.
8403 *
8404 * This routine process a mailbox completion queue entry with mailbox
8405 * completion event.
8406 *
8407 * Return: true if work posted to worker thread, otherwise false.
8408 **/
8409static bool
8410lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8411{
8412 uint32_t mcqe_status;
8413 MAILBOX_t *mbox, *pmbox;
8414 struct lpfc_mqe *mqe;
8415 struct lpfc_vport *vport;
8416 struct lpfc_nodelist *ndlp;
8417 struct lpfc_dmabuf *mp;
8418 unsigned long iflags;
8419 LPFC_MBOXQ_t *pmb;
8420 bool workposted = false;
8421 int rc;
8422
8423 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8424 if (!bf_get(lpfc_trailer_completed, mcqe))
8425 goto out_no_mqe_complete;
8426
8427 /* Get the reference to the active mbox command */
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pmb = phba->sli.mbox_active;
8430 if (unlikely(!pmb)) {
8431 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8432 "1832 No pending MBOX command to handle\n");
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434 goto out_no_mqe_complete;
8435 }
8436 spin_unlock_irqrestore(&phba->hbalock, iflags);
8437 mqe = &pmb->u.mqe;
8438 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8439 mbox = phba->mbox;
8440 vport = pmb->vport;
8441
8442 /* Reset heartbeat timer */
8443 phba->last_completion_time = jiffies;
8444 del_timer(&phba->sli.mbox_tmo);
8445
8446 /* Move mbox data to caller's mailbox region, do endian swapping */
8447 if (pmb->mbox_cmpl && mbox)
8448 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8449 /* Set the mailbox status with SLI4 range 0x4000 */
8450 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8451 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8452 bf_set(lpfc_mqe_status, mqe,
8453 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8454
8455 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8456 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8457 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8458 "MBOX dflt rpi: status:x%x rpi:x%x",
8459 mcqe_status,
8460 pmbox->un.varWords[0], 0);
8461 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8462 mp = (struct lpfc_dmabuf *)(pmb->context1);
8463 ndlp = (struct lpfc_nodelist *)pmb->context2;
8464 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8465 * RID of the PPI using the same mbox buffer.
8466 */
8467 lpfc_unreg_login(phba, vport->vpi,
8468 pmbox->un.varWords[0], pmb);
8469 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8470 pmb->context1 = mp;
8471 pmb->context2 = ndlp;
8472 pmb->vport = vport;
8473 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8474 if (rc != MBX_BUSY)
8475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8476 LOG_SLI, "0385 rc should "
8477 "have been MBX_BUSY\n");
8478 if (rc != MBX_NOT_FINISHED)
8479 goto send_current_mbox;
8480 }
8481 }
8482 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8483 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8484 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8485
8486 /* There is mailbox completion work to do */
8487 spin_lock_irqsave(&phba->hbalock, iflags);
8488 __lpfc_mbox_cmpl_put(phba, pmb);
8489 phba->work_ha |= HA_MBATT;
8490 spin_unlock_irqrestore(&phba->hbalock, iflags);
8491 workposted = true;
8492
8493send_current_mbox:
8494 spin_lock_irqsave(&phba->hbalock, iflags);
8495 /* Release the mailbox command posting token */
8496 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8497 /* Setting active mailbox pointer need to be in sync to flag clear */
8498 phba->sli.mbox_active = NULL;
8499 spin_unlock_irqrestore(&phba->hbalock, iflags);
8500 /* Wake up worker thread to post the next pending mailbox command */
8501 lpfc_worker_wake_up(phba);
8502out_no_mqe_complete:
8503 if (bf_get(lpfc_trailer_consumed, mcqe))
8504 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8505 return workposted;
8506}
8507
8508/**
8509 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8510 * @phba: Pointer to HBA context object.
8511 * @cqe: Pointer to mailbox completion queue entry.
8512 *
8513 * This routine process a mailbox completion queue entry, it invokes the
8514 * proper mailbox complete handling or asynchrous event handling routine
8515 * according to the MCQE's async bit.
8516 *
8517 * Return: true if work posted to worker thread, otherwise false.
8518 **/
8519static bool
8520lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8521{
8522 struct lpfc_mcqe mcqe;
8523 bool workposted;
8524
8525 /* Copy the mailbox MCQE and convert endian order as needed */
8526 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8527
8528 /* Invoke the proper event handling routine */
8529 if (!bf_get(lpfc_trailer_async, &mcqe))
8530 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8531 else
8532 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8533 return workposted;
8534}
8535
8536/**
8537 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8538 * @phba: Pointer to HBA context object.
8539 * @wcqe: Pointer to work-queue completion queue entry.
8540 *
8541 * This routine handles an ELS work-queue completion event.
8542 *
8543 * Return: true if work posted to worker thread, otherwise false.
8544 **/
8545static bool
8546lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8547 struct lpfc_wcqe_complete *wcqe)
8548{
8549 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8550 struct lpfc_iocbq *cmdiocbq;
8551 struct lpfc_iocbq *irspiocbq;
8552 unsigned long iflags;
8553 bool workposted = false;
8554
8555 spin_lock_irqsave(&phba->hbalock, iflags);
8556 pring->stats.iocb_event++;
8557 /* Look up the ELS command IOCB and create pseudo response IOCB */
8558 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8559 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8560 spin_unlock_irqrestore(&phba->hbalock, iflags);
8561
8562 if (unlikely(!cmdiocbq)) {
8563 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8564 "0386 ELS complete with no corresponding "
8565 "cmdiocb: iotag (%d)\n",
8566 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8567 return workposted;
8568 }
8569
8570 /* Fake the irspiocbq and copy necessary response information */
8571 irspiocbq = lpfc_sli_get_iocbq(phba);
8572 if (!irspiocbq) {
8573 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8574 "0387 Failed to allocate an iocbq\n");
8575 return workposted;
8576 }
8577 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8578
8579 /* Add the irspiocb to the response IOCB work list */
8580 spin_lock_irqsave(&phba->hbalock, iflags);
8581 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8582 /* Indicate ELS ring attention */
8583 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8584 spin_unlock_irqrestore(&phba->hbalock, iflags);
8585 workposted = true;
8586
8587 return workposted;
8588}
8589
8590/**
8591 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8592 * @phba: Pointer to HBA context object.
8593 * @wcqe: Pointer to work-queue completion queue entry.
8594 *
8595 * This routine handles slow-path WQ entry comsumed event by invoking the
8596 * proper WQ release routine to the slow-path WQ.
8597 **/
8598static void
8599lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8600 struct lpfc_wcqe_release *wcqe)
8601{
8602 /* Check for the slow-path ELS work queue */
8603 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8604 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8605 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8606 else
8607 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8608 "2579 Slow-path wqe consume event carries "
8609 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8610 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8611 phba->sli4_hba.els_wq->queue_id);
8612}
8613
8614/**
8615 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8616 * @phba: Pointer to HBA context object.
8617 * @cq: Pointer to a WQ completion queue.
8618 * @wcqe: Pointer to work-queue completion queue entry.
8619 *
8620 * This routine handles an XRI abort event.
8621 *
8622 * Return: true if work posted to worker thread, otherwise false.
8623 **/
8624static bool
8625lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8626 struct lpfc_queue *cq,
8627 struct sli4_wcqe_xri_aborted *wcqe)
8628{
8629 bool workposted = false;
8630 struct lpfc_cq_event *cq_event;
8631 unsigned long iflags;
8632
8633 /* Allocate a new internal CQ_EVENT entry */
8634 cq_event = lpfc_sli4_cq_event_alloc(phba);
8635 if (!cq_event) {
8636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8637 "0602 Failed to allocate CQ_EVENT entry\n");
8638 return false;
8639 }
8640
8641 /* Move the CQE into the proper xri abort event list */
8642 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8643 switch (cq->subtype) {
8644 case LPFC_FCP:
8645 spin_lock_irqsave(&phba->hbalock, iflags);
8646 list_add_tail(&cq_event->list,
8647 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8648 /* Set the fcp xri abort event flag */
8649 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8650 spin_unlock_irqrestore(&phba->hbalock, iflags);
8651 workposted = true;
8652 break;
8653 case LPFC_ELS:
8654 spin_lock_irqsave(&phba->hbalock, iflags);
8655 list_add_tail(&cq_event->list,
8656 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8657 /* Set the els xri abort event flag */
8658 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8659 spin_unlock_irqrestore(&phba->hbalock, iflags);
8660 workposted = true;
8661 break;
8662 default:
8663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8664 "0603 Invalid work queue CQE subtype (x%x)\n",
8665 cq->subtype);
8666 workposted = false;
8667 break;
8668 }
8669 return workposted;
8670}
8671
8672/**
8673 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8674 * @phba: Pointer to HBA context object.
8675 * @cq: Pointer to the completion queue.
8676 * @wcqe: Pointer to a completion queue entry.
8677 *
8678 * This routine process a slow-path work-queue completion queue entry.
8679 *
8680 * Return: true if work posted to worker thread, otherwise false.
8681 **/
8682static bool
8683lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8684 struct lpfc_cqe *cqe)
8685{
8686 struct lpfc_wcqe_complete wcqe;
8687 bool workposted = false;
8688
8689 /* Copy the work queue CQE and convert endian order if needed */
8690 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8691
8692 /* Check and process for different type of WCQE and dispatch */
8693 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8694 case CQE_CODE_COMPL_WQE:
8695 /* Process the WQ complete event */
8696 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8697 (struct lpfc_wcqe_complete *)&wcqe);
8698 break;
8699 case CQE_CODE_RELEASE_WQE:
8700 /* Process the WQ release event */
8701 lpfc_sli4_sp_handle_rel_wcqe(phba,
8702 (struct lpfc_wcqe_release *)&wcqe);
8703 break;
8704 case CQE_CODE_XRI_ABORTED:
8705 /* Process the WQ XRI abort event */
8706 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8707 (struct sli4_wcqe_xri_aborted *)&wcqe);
8708 break;
8709 default:
8710 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8711 "0388 Not a valid WCQE code: x%x\n",
8712 bf_get(lpfc_wcqe_c_code, &wcqe));
8713 break;
8714 }
8715 return workposted;
8716}
8717
8718/**
8719 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8720 * @phba: Pointer to HBA context object.
8721 * @rcqe: Pointer to receive-queue completion queue entry.
8722 *
8723 * This routine process a receive-queue completion queue entry.
8724 *
8725 * Return: true if work posted to worker thread, otherwise false.
8726 **/
8727static bool
8728lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8729{
8730 struct lpfc_rcqe rcqe;
8731 bool workposted = false;
8732 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8733 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8734 struct hbq_dmabuf *dma_buf;
8735 uint32_t status;
8736 unsigned long iflags;
8737
8738 /* Copy the receive queue CQE and convert endian order if needed */
8739 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8740 lpfc_sli4_rq_release(hrq, drq);
8741 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8742 goto out;
8743 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8744 goto out;
8745
8746 status = bf_get(lpfc_rcqe_status, &rcqe);
8747 switch (status) {
8748 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8750 "2537 Receive Frame Truncated!!\n");
8751 case FC_STATUS_RQ_SUCCESS:
8752 spin_lock_irqsave(&phba->hbalock, iflags);
8753 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8754 if (!dma_buf) {
8755 spin_unlock_irqrestore(&phba->hbalock, iflags);
8756 goto out;
8757 }
8758 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8759 /* save off the frame for the word thread to process */
8760 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8761 /* Frame received */
8762 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8763 spin_unlock_irqrestore(&phba->hbalock, iflags);
8764 workposted = true;
8765 break;
8766 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8767 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8768 /* Post more buffers if possible */
8769 spin_lock_irqsave(&phba->hbalock, iflags);
8770 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8771 spin_unlock_irqrestore(&phba->hbalock, iflags);
8772 workposted = true;
8773 break;
8774 }
8775out:
8776 return workposted;
8777
8778}
8779
8780/**
8781 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8782 * @phba: Pointer to HBA context object.
8783 * @eqe: Pointer to fast-path event queue entry.
8784 *
8785 * This routine process a event queue entry from the slow-path event queue.
8786 * It will check the MajorCode and MinorCode to determine this is for a
8787 * completion event on a completion queue, if not, an error shall be logged
8788 * and just return. Otherwise, it will get to the corresponding completion
8789 * queue and process all the entries on that completion queue, rearm the
8790 * completion queue, and then return.
8791 *
8792 **/
8793static void
8794lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8795{
8796 struct lpfc_queue *cq = NULL, *childq, *speq;
8797 struct lpfc_cqe *cqe;
8798 bool workposted = false;
8799 int ecount = 0;
8800 uint16_t cqid;
8801
8802 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8803 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8805 "0359 Not a valid slow-path completion "
8806 "event: majorcode=x%x, minorcode=x%x\n",
8807 bf_get(lpfc_eqe_major_code, eqe),
8808 bf_get(lpfc_eqe_minor_code, eqe));
8809 return;
8810 }
8811
8812 /* Get the reference to the corresponding CQ */
8813 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8814
8815 /* Search for completion queue pointer matching this cqid */
8816 speq = phba->sli4_hba.sp_eq;
8817 list_for_each_entry(childq, &speq->child_list, list) {
8818 if (childq->queue_id == cqid) {
8819 cq = childq;
8820 break;
8821 }
8822 }
8823 if (unlikely(!cq)) {
8824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8825 "0365 Slow-path CQ identifier (%d) does "
8826 "not exist\n", cqid);
8827 return;
8828 }
8829
8830 /* Process all the entries to the CQ */
8831 switch (cq->type) {
8832 case LPFC_MCQ:
8833 while ((cqe = lpfc_sli4_cq_get(cq))) {
8834 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8835 if (!(++ecount % LPFC_GET_QE_REL_INT))
8836 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8837 }
8838 break;
8839 case LPFC_WCQ:
8840 while ((cqe = lpfc_sli4_cq_get(cq))) {
8841 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8842 if (!(++ecount % LPFC_GET_QE_REL_INT))
8843 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8844 }
8845 break;
8846 case LPFC_RCQ:
8847 while ((cqe = lpfc_sli4_cq_get(cq))) {
8848 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8849 if (!(++ecount % LPFC_GET_QE_REL_INT))
8850 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8851 }
8852 break;
8853 default:
8854 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8855 "0370 Invalid completion queue type (%d)\n",
8856 cq->type);
8857 return;
8858 }
8859
8860 /* Catch the no cq entry condition, log an error */
8861 if (unlikely(ecount == 0))
8862 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8863 "0371 No entry from the CQ: identifier "
8864 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8865
8866 /* In any case, flash and re-arm the RCQ */
8867 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8868
8869 /* wake up worker thread if there are works to be done */
8870 if (workposted)
8871 lpfc_worker_wake_up(phba);
8872}
8873
8874/**
8875 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8876 * @eqe: Pointer to fast-path completion queue entry.
8877 *
8878 * This routine process a fast-path work queue completion entry from fast-path
8879 * event queue for FCP command response completion.
8880 **/
8881static void
8882lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8883 struct lpfc_wcqe_complete *wcqe)
8884{
8885 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8886 struct lpfc_iocbq *cmdiocbq;
8887 struct lpfc_iocbq irspiocbq;
8888 unsigned long iflags;
8889
8890 spin_lock_irqsave(&phba->hbalock, iflags);
8891 pring->stats.iocb_event++;
8892 spin_unlock_irqrestore(&phba->hbalock, iflags);
8893
8894 /* Check for response status */
8895 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8896 /* If resource errors reported from HBA, reduce queue
8897 * depth of the SCSI device.
8898 */
8899 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8900 IOSTAT_LOCAL_REJECT) &&
8901 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8902 phba->lpfc_rampdown_queue_depth(phba);
8903 }
8904 /* Log the error status */
8905 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8906 "0373 FCP complete error: status=x%x, "
8907 "hw_status=x%x, total_data_specified=%d, "
8908 "parameter=x%x, word3=x%x\n",
8909 bf_get(lpfc_wcqe_c_status, wcqe),
8910 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8911 wcqe->total_data_placed, wcqe->parameter,
8912 wcqe->word3);
8913 }
8914
8915 /* Look up the FCP command IOCB and create pseudo response IOCB */
8916 spin_lock_irqsave(&phba->hbalock, iflags);
8917 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8918 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8919 spin_unlock_irqrestore(&phba->hbalock, iflags);
8920 if (unlikely(!cmdiocbq)) {
8921 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8922 "0374 FCP complete with no corresponding "
8923 "cmdiocb: iotag (%d)\n",
8924 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8925 return;
8926 }
8927 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8928 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8929 "0375 FCP cmdiocb not callback function "
8930 "iotag: (%d)\n",
8931 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8932 return;
8933 }
8934
8935 /* Fake the irspiocb and copy necessary response information */
8936 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8937
8938 /* Pass the cmd_iocb and the rsp state to the upper layer */
8939 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8940}
8941
8942/**
8943 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8944 * @phba: Pointer to HBA context object.
8945 * @cq: Pointer to completion queue.
8946 * @wcqe: Pointer to work-queue completion queue entry.
8947 *
8948 * This routine handles an fast-path WQ entry comsumed event by invoking the
8949 * proper WQ release routine to the slow-path WQ.
8950 **/
8951static void
8952lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8953 struct lpfc_wcqe_release *wcqe)
8954{
8955 struct lpfc_queue *childwq;
8956 bool wqid_matched = false;
8957 uint16_t fcp_wqid;
8958
8959 /* Check for fast-path FCP work queue release */
8960 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8961 list_for_each_entry(childwq, &cq->child_list, list) {
8962 if (childwq->queue_id == fcp_wqid) {
8963 lpfc_sli4_wq_release(childwq,
8964 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8965 wqid_matched = true;
8966 break;
8967 }
8968 }
8969 /* Report warning log message if no match found */
8970 if (wqid_matched != true)
8971 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8972 "2580 Fast-path wqe consume event carries "
8973 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8974}
8975
8976/**
8977 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8978 * @cq: Pointer to the completion queue.
8979 * @eqe: Pointer to fast-path completion queue entry.
8980 *
8981 * This routine process a fast-path work queue completion entry from fast-path
8982 * event queue for FCP command response completion.
8983 **/
8984static int
8985lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8986 struct lpfc_cqe *cqe)
8987{
8988 struct lpfc_wcqe_release wcqe;
8989 bool workposted = false;
8990
8991 /* Copy the work queue CQE and convert endian order if needed */
8992 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8993
8994 /* Check and process for different type of WCQE and dispatch */
8995 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8996 case CQE_CODE_COMPL_WQE:
8997 /* Process the WQ complete event */
8998 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8999 (struct lpfc_wcqe_complete *)&wcqe);
9000 break;
9001 case CQE_CODE_RELEASE_WQE:
9002 /* Process the WQ release event */
9003 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
9004 (struct lpfc_wcqe_release *)&wcqe);
9005 break;
9006 case CQE_CODE_XRI_ABORTED:
9007 /* Process the WQ XRI abort event */
9008 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
9009 (struct sli4_wcqe_xri_aborted *)&wcqe);
9010 break;
9011 default:
9012 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9013 "0144 Not a valid WCQE code: x%x\n",
9014 bf_get(lpfc_wcqe_c_code, &wcqe));
9015 break;
9016 }
9017 return workposted;
9018}
9019
9020/**
9021 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
9022 * @phba: Pointer to HBA context object.
9023 * @eqe: Pointer to fast-path event queue entry.
9024 *
9025 * This routine process a event queue entry from the fast-path event queue.
9026 * It will check the MajorCode and MinorCode to determine this is for a
9027 * completion event on a completion queue, if not, an error shall be logged
9028 * and just return. Otherwise, it will get to the corresponding completion
9029 * queue and process all the entries on the completion queue, rearm the
9030 * completion queue, and then return.
9031 **/
9032static void
9033lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9034 uint32_t fcp_cqidx)
9035{
9036 struct lpfc_queue *cq;
9037 struct lpfc_cqe *cqe;
9038 bool workposted = false;
9039 uint16_t cqid;
9040 int ecount = 0;
9041
9042 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
9043 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9044 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9045 "0366 Not a valid fast-path completion "
9046 "event: majorcode=x%x, minorcode=x%x\n",
9047 bf_get(lpfc_eqe_major_code, eqe),
9048 bf_get(lpfc_eqe_minor_code, eqe));
9049 return;
9050 }
9051
9052 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9053 if (unlikely(!cq)) {
9054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9055 "0367 Fast-path completion queue does not "
9056 "exist\n");
9057 return;
9058 }
9059
9060 /* Get the reference to the corresponding CQ */
9061 cqid = bf_get(lpfc_eqe_resource_id, eqe);
9062 if (unlikely(cqid != cq->queue_id)) {
9063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9064 "0368 Miss-matched fast-path completion "
9065 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
9066 cqid, cq->queue_id);
9067 return;
9068 }
9069
9070 /* Process all the entries to the CQ */
9071 while ((cqe = lpfc_sli4_cq_get(cq))) {
9072 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
9073 if (!(++ecount % LPFC_GET_QE_REL_INT))
9074 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9075 }
9076
9077 /* Catch the no cq entry condition */
9078 if (unlikely(ecount == 0))
9079 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9080 "0369 No entry from fast-path completion "
9081 "queue fcpcqid=%d\n", cq->queue_id);
9082
9083 /* In any case, flash and re-arm the CQ */
9084 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
9085
9086 /* wake up worker thread if there are works to be done */
9087 if (workposted)
9088 lpfc_worker_wake_up(phba);
9089}
9090
9091static void
9092lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
9093{
9094 struct lpfc_eqe *eqe;
9095
9096 /* walk all the EQ entries and drop on the floor */
9097 while ((eqe = lpfc_sli4_eq_get(eq)))
9098 ;
9099
9100 /* Clear and re-arm the EQ */
9101 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
9102}
9103
9104/**
9105 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
9106 * @irq: Interrupt number.
9107 * @dev_id: The device context pointer.
9108 *
9109 * This function is directly called from the PCI layer as an interrupt
9110 * service routine when device with SLI-4 interface spec is enabled with
9111 * MSI-X multi-message interrupt mode and there are slow-path events in
9112 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9113 * interrupt mode, this function is called as part of the device-level
9114 * interrupt handler. When the PCI slot is in error recovery or the HBA is
9115 * undergoing initialization, the interrupt handler will not process the
9116 * interrupt. The link attention and ELS ring attention events are handled
9117 * by the worker thread. The interrupt handler signals the worker thread
9118 * and returns for these events. This function is called without any lock
9119 * held. It gets the hbalock to access and update SLI data structures.
9120 *
9121 * This function returns IRQ_HANDLED when interrupt is handled else it
9122 * returns IRQ_NONE.
9123 **/
9124irqreturn_t
9125lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
9126{
9127 struct lpfc_hba *phba;
9128 struct lpfc_queue *speq;
9129 struct lpfc_eqe *eqe;
9130 unsigned long iflag;
9131 int ecount = 0;
9132
9133 /*
9134 * Get the driver's phba structure from the dev_id
9135 */
9136 phba = (struct lpfc_hba *)dev_id;
9137
9138 if (unlikely(!phba))
9139 return IRQ_NONE;
9140
9141 /* Get to the EQ struct associated with this vector */
9142 speq = phba->sli4_hba.sp_eq;
9143
9144 /* Check device state for handling interrupt */
9145 if (unlikely(lpfc_intr_state_check(phba))) {
9146 /* Check again for link_state with lock held */
9147 spin_lock_irqsave(&phba->hbalock, iflag);
9148 if (phba->link_state < LPFC_LINK_DOWN)
9149 /* Flush, clear interrupt, and rearm the EQ */
9150 lpfc_sli4_eq_flush(phba, speq);
9151 spin_unlock_irqrestore(&phba->hbalock, iflag);
9152 return IRQ_NONE;
9153 }
9154
9155 /*
9156 * Process all the event on FCP slow-path EQ
9157 */
9158 while ((eqe = lpfc_sli4_eq_get(speq))) {
9159 lpfc_sli4_sp_handle_eqe(phba, eqe);
9160 if (!(++ecount % LPFC_GET_QE_REL_INT))
9161 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9162 }
9163
9164 /* Always clear and re-arm the slow-path EQ */
9165 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9166
9167 /* Catch the no cq entry condition */
9168 if (unlikely(ecount == 0)) {
9169 if (phba->intr_type == MSIX)
9170 /* MSI-X treated interrupt served as no EQ share INT */
9171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9172 "0357 MSI-X interrupt with no EQE\n");
9173 else
9174 /* Non MSI-X treated on interrupt as EQ share INT */
9175 return IRQ_NONE;
9176 }
9177
9178 return IRQ_HANDLED;
9179} /* lpfc_sli4_sp_intr_handler */
9180
9181/**
9182 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9183 * @irq: Interrupt number.
9184 * @dev_id: The device context pointer.
9185 *
9186 * This function is directly called from the PCI layer as an interrupt
9187 * service routine when device with SLI-4 interface spec is enabled with
9188 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9189 * ring event in the HBA. However, when the device is enabled with either
9190 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9191 * device-level interrupt handler. When the PCI slot is in error recovery
9192 * or the HBA is undergoing initialization, the interrupt handler will not
9193 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9194 * the intrrupt context. This function is called without any lock held.
9195 * It gets the hbalock to access and update SLI data structures. Note that,
9196 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9197 * equal to that of FCP CQ index.
9198 *
9199 * This function returns IRQ_HANDLED when interrupt is handled else it
9200 * returns IRQ_NONE.
9201 **/
9202irqreturn_t
9203lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9204{
9205 struct lpfc_hba *phba;
9206 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9207 struct lpfc_queue *fpeq;
9208 struct lpfc_eqe *eqe;
9209 unsigned long iflag;
9210 int ecount = 0;
9211 uint32_t fcp_eqidx;
9212
9213 /* Get the driver's phba structure from the dev_id */
9214 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9215 phba = fcp_eq_hdl->phba;
9216 fcp_eqidx = fcp_eq_hdl->idx;
9217
9218 if (unlikely(!phba))
9219 return IRQ_NONE;
9220
9221 /* Get to the EQ struct associated with this vector */
9222 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9223
9224 /* Check device state for handling interrupt */
9225 if (unlikely(lpfc_intr_state_check(phba))) {
9226 /* Check again for link_state with lock held */
9227 spin_lock_irqsave(&phba->hbalock, iflag);
9228 if (phba->link_state < LPFC_LINK_DOWN)
9229 /* Flush, clear interrupt, and rearm the EQ */
9230 lpfc_sli4_eq_flush(phba, fpeq);
9231 spin_unlock_irqrestore(&phba->hbalock, iflag);
9232 return IRQ_NONE;
9233 }
9234
9235 /*
9236 * Process all the event on FCP fast-path EQ
9237 */
9238 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9239 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9240 if (!(++ecount % LPFC_GET_QE_REL_INT))
9241 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9242 }
9243
9244 /* Always clear and re-arm the fast-path EQ */
9245 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9246
9247 if (unlikely(ecount == 0)) {
9248 if (phba->intr_type == MSIX)
9249 /* MSI-X treated interrupt served as no EQ share INT */
9250 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9251 "0358 MSI-X interrupt with no EQE\n");
9252 else
9253 /* Non MSI-X treated on interrupt as EQ share INT */
9254 return IRQ_NONE;
9255 }
9256
9257 return IRQ_HANDLED;
9258} /* lpfc_sli4_fp_intr_handler */
9259
9260/**
9261 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9262 * @irq: Interrupt number.
9263 * @dev_id: The device context pointer.
9264 *
9265 * This function is the device-level interrupt handler to device with SLI-4
9266 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9267 * interrupt mode is enabled and there is an event in the HBA which requires
9268 * driver attention. This function invokes the slow-path interrupt attention
9269 * handling function and fast-path interrupt attention handling function in
9270 * turn to process the relevant HBA attention events. This function is called
9271 * without any lock held. It gets the hbalock to access and update SLI data
9272 * structures.
9273 *
9274 * This function returns IRQ_HANDLED when interrupt is handled, else it
9275 * returns IRQ_NONE.
9276 **/
9277irqreturn_t
9278lpfc_sli4_intr_handler(int irq, void *dev_id)
9279{
9280 struct lpfc_hba *phba;
9281 irqreturn_t sp_irq_rc, fp_irq_rc;
9282 bool fp_handled = false;
9283 uint32_t fcp_eqidx;
9284
9285 /* Get the driver's phba structure from the dev_id */
9286 phba = (struct lpfc_hba *)dev_id;
9287
9288 if (unlikely(!phba))
9289 return IRQ_NONE;
9290
9291 /*
9292 * Invokes slow-path host attention interrupt handling as appropriate.
9293 */
9294 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9295
9296 /*
9297 * Invoke fast-path host attention interrupt handling as appropriate.
9298 */
9299 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9300 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9301 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9302 if (fp_irq_rc == IRQ_HANDLED)
9303 fp_handled |= true;
9304 }
9305
9306 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9307} /* lpfc_sli4_intr_handler */
9308
9309/**
9310 * lpfc_sli4_queue_free - free a queue structure and associated memory
9311 * @queue: The queue structure to free.
9312 *
9313 * This function frees a queue structure and the DMAable memeory used for
9314 * the host resident queue. This function must be called after destroying the
9315 * queue on the HBA.
9316 **/
9317void
9318lpfc_sli4_queue_free(struct lpfc_queue *queue)
9319{
9320 struct lpfc_dmabuf *dmabuf;
9321
9322 if (!queue)
9323 return;
9324
9325 while (!list_empty(&queue->page_list)) {
9326 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9327 list);
9328 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9329 dmabuf->virt, dmabuf->phys);
9330 kfree(dmabuf);
9331 }
9332 kfree(queue);
9333 return;
9334}
9335
9336/**
9337 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9338 * @phba: The HBA that this queue is being created on.
9339 * @entry_size: The size of each queue entry for this queue.
9340 * @entry count: The number of entries that this queue will handle.
9341 *
9342 * This function allocates a queue structure and the DMAable memory used for
9343 * the host resident queue. This function must be called before creating the
9344 * queue on the HBA.
9345 **/
9346struct lpfc_queue *
9347lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9348 uint32_t entry_count)
9349{
9350 struct lpfc_queue *queue;
9351 struct lpfc_dmabuf *dmabuf;
9352 int x, total_qe_count;
9353 void *dma_pointer;
9354
9355
9356 queue = kzalloc(sizeof(struct lpfc_queue) +
9357 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9358 if (!queue)
9359 return NULL;
9360 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9361 INIT_LIST_HEAD(&queue->list);
9362 INIT_LIST_HEAD(&queue->page_list);
9363 INIT_LIST_HEAD(&queue->child_list);
9364 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9365 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9366 if (!dmabuf)
9367 goto out_fail;
9368 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9369 PAGE_SIZE, &dmabuf->phys,
9370 GFP_KERNEL);
9371 if (!dmabuf->virt) {
9372 kfree(dmabuf);
9373 goto out_fail;
9374 }
9375 memset(dmabuf->virt, 0, PAGE_SIZE);
9376 dmabuf->buffer_tag = x;
9377 list_add_tail(&dmabuf->list, &queue->page_list);
9378 /* initialize queue's entry array */
9379 dma_pointer = dmabuf->virt;
9380 for (; total_qe_count < entry_count &&
9381 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9382 total_qe_count++, dma_pointer += entry_size) {
9383 queue->qe[total_qe_count].address = dma_pointer;
9384 }
9385 }
9386 queue->entry_size = entry_size;
9387 queue->entry_count = entry_count;
9388 queue->phba = phba;
9389
9390 return queue;
9391out_fail:
9392 lpfc_sli4_queue_free(queue);
9393 return NULL;
9394}
9395
9396/**
9397 * lpfc_eq_create - Create an Event Queue on the HBA
9398 * @phba: HBA structure that indicates port to create a queue on.
9399 * @eq: The queue structure to use to create the event queue.
9400 * @imax: The maximum interrupt per second limit.
9401 *
9402 * This function creates an event queue, as detailed in @eq, on a port,
9403 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9404 *
9405 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9406 * is used to get the entry count and entry size that are necessary to
9407 * determine the number of pages to allocate and use for this queue. This
9408 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9409 * event queue. This function is asynchronous and will wait for the mailbox
9410 * command to finish before continuing.
9411 *
9412 * On success this function will return a zero. If unable to allocate enough
9413 * memory this function will return ENOMEM. If the queue create mailbox command
9414 * fails this function will return ENXIO.
9415 **/
9416uint32_t
9417lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9418{
9419 struct lpfc_mbx_eq_create *eq_create;
9420 LPFC_MBOXQ_t *mbox;
9421 int rc, length, status = 0;
9422 struct lpfc_dmabuf *dmabuf;
9423 uint32_t shdr_status, shdr_add_status;
9424 union lpfc_sli4_cfg_shdr *shdr;
9425 uint16_t dmult;
9426
9427 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9428 if (!mbox)
9429 return -ENOMEM;
9430 length = (sizeof(struct lpfc_mbx_eq_create) -
9431 sizeof(struct lpfc_sli4_cfg_mhdr));
9432 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9433 LPFC_MBOX_OPCODE_EQ_CREATE,
9434 length, LPFC_SLI4_MBX_EMBED);
9435 eq_create = &mbox->u.mqe.un.eq_create;
9436 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9437 eq->page_count);
9438 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9439 LPFC_EQE_SIZE);
9440 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9441 /* Calculate delay multiper from maximum interrupt per second */
9442 dmult = LPFC_DMULT_CONST/imax - 1;
9443 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9444 dmult);
9445 switch (eq->entry_count) {
9446 default:
9447 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9448 "0360 Unsupported EQ count. (%d)\n",
9449 eq->entry_count);
9450 if (eq->entry_count < 256)
9451 return -EINVAL;
9452 /* otherwise default to smallest count (drop through) */
9453 case 256:
9454 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9455 LPFC_EQ_CNT_256);
9456 break;
9457 case 512:
9458 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9459 LPFC_EQ_CNT_512);
9460 break;
9461 case 1024:
9462 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9463 LPFC_EQ_CNT_1024);
9464 break;
9465 case 2048:
9466 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9467 LPFC_EQ_CNT_2048);
9468 break;
9469 case 4096:
9470 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9471 LPFC_EQ_CNT_4096);
9472 break;
9473 }
9474 list_for_each_entry(dmabuf, &eq->page_list, list) {
9475 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9476 putPaddrLow(dmabuf->phys);
9477 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9478 putPaddrHigh(dmabuf->phys);
9479 }
9480 mbox->vport = phba->pport;
9481 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9482 mbox->context1 = NULL;
9483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9484 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9485 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9486 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9487 if (shdr_status || shdr_add_status || rc) {
9488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9489 "2500 EQ_CREATE mailbox failed with "
9490 "status x%x add_status x%x, mbx status x%x\n",
9491 shdr_status, shdr_add_status, rc);
9492 status = -ENXIO;
9493 }
9494 eq->type = LPFC_EQ;
9495 eq->subtype = LPFC_NONE;
9496 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9497 if (eq->queue_id == 0xFFFF)
9498 status = -ENXIO;
9499 eq->host_index = 0;
9500 eq->hba_index = 0;
9501
9502 if (rc != MBX_TIMEOUT)
9503 mempool_free(mbox, phba->mbox_mem_pool);
9504 return status;
9505}
9506
9507/**
9508 * lpfc_cq_create - Create a Completion Queue on the HBA
9509 * @phba: HBA structure that indicates port to create a queue on.
9510 * @cq: The queue structure to use to create the completion queue.
9511 * @eq: The event queue to bind this completion queue to.
9512 *
9513 * This function creates a completion queue, as detailed in @wq, on a port,
9514 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9515 *
9516 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9517 * is used to get the entry count and entry size that are necessary to
9518 * determine the number of pages to allocate and use for this queue. The @eq
9519 * is used to indicate which event queue to bind this completion queue to. This
9520 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9521 * completion queue. This function is asynchronous and will wait for the mailbox
9522 * command to finish before continuing.
9523 *
9524 * On success this function will return a zero. If unable to allocate enough
9525 * memory this function will return ENOMEM. If the queue create mailbox command
9526 * fails this function will return ENXIO.
9527 **/
9528uint32_t
9529lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9530 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9531{
9532 struct lpfc_mbx_cq_create *cq_create;
9533 struct lpfc_dmabuf *dmabuf;
9534 LPFC_MBOXQ_t *mbox;
9535 int rc, length, status = 0;
9536 uint32_t shdr_status, shdr_add_status;
9537 union lpfc_sli4_cfg_shdr *shdr;
9538
9539 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9540 if (!mbox)
9541 return -ENOMEM;
9542 length = (sizeof(struct lpfc_mbx_cq_create) -
9543 sizeof(struct lpfc_sli4_cfg_mhdr));
9544 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9545 LPFC_MBOX_OPCODE_CQ_CREATE,
9546 length, LPFC_SLI4_MBX_EMBED);
9547 cq_create = &mbox->u.mqe.un.cq_create;
9548 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9549 cq->page_count);
9550 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9551 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9552 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9553 switch (cq->entry_count) {
9554 default:
9555 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9556 "0361 Unsupported CQ count. (%d)\n",
9557 cq->entry_count);
9558 if (cq->entry_count < 256)
9559 return -EINVAL;
9560 /* otherwise default to smallest count (drop through) */
9561 case 256:
9562 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9563 LPFC_CQ_CNT_256);
9564 break;
9565 case 512:
9566 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9567 LPFC_CQ_CNT_512);
9568 break;
9569 case 1024:
9570 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9571 LPFC_CQ_CNT_1024);
9572 break;
9573 }
9574 list_for_each_entry(dmabuf, &cq->page_list, list) {
9575 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9576 putPaddrLow(dmabuf->phys);
9577 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9578 putPaddrHigh(dmabuf->phys);
9579 }
9580 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9581
9582 /* The IOCTL status is embedded in the mailbox subheader. */
9583 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9584 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9585 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9586 if (shdr_status || shdr_add_status || rc) {
9587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9588 "2501 CQ_CREATE mailbox failed with "
9589 "status x%x add_status x%x, mbx status x%x\n",
9590 shdr_status, shdr_add_status, rc);
9591 status = -ENXIO;
9592 goto out;
9593 }
9594 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9595 if (cq->queue_id == 0xFFFF) {
9596 status = -ENXIO;
9597 goto out;
9598 }
9599 /* link the cq onto the parent eq child list */
9600 list_add_tail(&cq->list, &eq->child_list);
9601 /* Set up completion queue's type and subtype */
9602 cq->type = type;
9603 cq->subtype = subtype;
9604 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9605 cq->host_index = 0;
9606 cq->hba_index = 0;
9607out:
9608
9609 if (rc != MBX_TIMEOUT)
9610 mempool_free(mbox, phba->mbox_mem_pool);
9611 return status;
9612}
9613
9614/**
9615 * lpfc_mq_create - Create a mailbox Queue on the HBA
9616 * @phba: HBA structure that indicates port to create a queue on.
9617 * @mq: The queue structure to use to create the mailbox queue.
9618 *
9619 * This function creates a mailbox queue, as detailed in @mq, on a port,
9620 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9621 *
9622 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9623 * is used to get the entry count and entry size that are necessary to
9624 * determine the number of pages to allocate and use for this queue. This
9625 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9626 * mailbox queue. This function is asynchronous and will wait for the mailbox
9627 * command to finish before continuing.
9628 *
9629 * On success this function will return a zero. If unable to allocate enough
9630 * memory this function will return ENOMEM. If the queue create mailbox command
9631 * fails this function will return ENXIO.
9632 **/
9633uint32_t
9634lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9635 struct lpfc_queue *cq, uint32_t subtype)
9636{
9637 struct lpfc_mbx_mq_create *mq_create;
9638 struct lpfc_dmabuf *dmabuf;
9639 LPFC_MBOXQ_t *mbox;
9640 int rc, length, status = 0;
9641 uint32_t shdr_status, shdr_add_status;
9642 union lpfc_sli4_cfg_shdr *shdr;
9643
9644 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9645 if (!mbox)
9646 return -ENOMEM;
9647 length = (sizeof(struct lpfc_mbx_mq_create) -
9648 sizeof(struct lpfc_sli4_cfg_mhdr));
9649 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9650 LPFC_MBOX_OPCODE_MQ_CREATE,
9651 length, LPFC_SLI4_MBX_EMBED);
9652 mq_create = &mbox->u.mqe.un.mq_create;
9653 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9654 mq->page_count);
9655 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9656 cq->queue_id);
9657 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9658 switch (mq->entry_count) {
9659 default:
9660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9661 "0362 Unsupported MQ count. (%d)\n",
9662 mq->entry_count);
9663 if (mq->entry_count < 16)
9664 return -EINVAL;
9665 /* otherwise default to smallest count (drop through) */
9666 case 16:
9667 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9668 LPFC_MQ_CNT_16);
9669 break;
9670 case 32:
9671 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9672 LPFC_MQ_CNT_32);
9673 break;
9674 case 64:
9675 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9676 LPFC_MQ_CNT_64);
9677 break;
9678 case 128:
9679 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9680 LPFC_MQ_CNT_128);
9681 break;
9682 }
9683 list_for_each_entry(dmabuf, &mq->page_list, list) {
9684 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9685 putPaddrLow(dmabuf->phys);
9686 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9687 putPaddrHigh(dmabuf->phys);
9688 }
9689 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9690 /* The IOCTL status is embedded in the mailbox subheader. */
9691 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9692 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9693 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9694 if (shdr_status || shdr_add_status || rc) {
9695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9696 "2502 MQ_CREATE mailbox failed with "
9697 "status x%x add_status x%x, mbx status x%x\n",
9698 shdr_status, shdr_add_status, rc);
9699 status = -ENXIO;
9700 goto out;
9701 }
9702 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9703 if (mq->queue_id == 0xFFFF) {
9704 status = -ENXIO;
9705 goto out;
9706 }
9707 mq->type = LPFC_MQ;
9708 mq->subtype = subtype;
9709 mq->host_index = 0;
9710 mq->hba_index = 0;
9711
9712 /* link the mq onto the parent cq child list */
9713 list_add_tail(&mq->list, &cq->child_list);
9714out:
9715 if (rc != MBX_TIMEOUT)
9716 mempool_free(mbox, phba->mbox_mem_pool);
9717 return status;
9718}
9719
9720/**
9721 * lpfc_wq_create - Create a Work Queue on the HBA
9722 * @phba: HBA structure that indicates port to create a queue on.
9723 * @wq: The queue structure to use to create the work queue.
9724 * @cq: The completion queue to bind this work queue to.
9725 * @subtype: The subtype of the work queue indicating its functionality.
9726 *
9727 * This function creates a work queue, as detailed in @wq, on a port, described
9728 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9729 *
9730 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9731 * is used to get the entry count and entry size that are necessary to
9732 * determine the number of pages to allocate and use for this queue. The @cq
9733 * is used to indicate which completion queue to bind this work queue to. This
9734 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9735 * work queue. This function is asynchronous and will wait for the mailbox
9736 * command to finish before continuing.
9737 *
9738 * On success this function will return a zero. If unable to allocate enough
9739 * memory this function will return ENOMEM. If the queue create mailbox command
9740 * fails this function will return ENXIO.
9741 **/
9742uint32_t
9743lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9744 struct lpfc_queue *cq, uint32_t subtype)
9745{
9746 struct lpfc_mbx_wq_create *wq_create;
9747 struct lpfc_dmabuf *dmabuf;
9748 LPFC_MBOXQ_t *mbox;
9749 int rc, length, status = 0;
9750 uint32_t shdr_status, shdr_add_status;
9751 union lpfc_sli4_cfg_shdr *shdr;
9752
9753 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9754 if (!mbox)
9755 return -ENOMEM;
9756 length = (sizeof(struct lpfc_mbx_wq_create) -
9757 sizeof(struct lpfc_sli4_cfg_mhdr));
9758 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9759 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9760 length, LPFC_SLI4_MBX_EMBED);
9761 wq_create = &mbox->u.mqe.un.wq_create;
9762 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9763 wq->page_count);
9764 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9765 cq->queue_id);
9766 list_for_each_entry(dmabuf, &wq->page_list, list) {
9767 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9768 putPaddrLow(dmabuf->phys);
9769 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9770 putPaddrHigh(dmabuf->phys);
9771 }
9772 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9773 /* The IOCTL status is embedded in the mailbox subheader. */
9774 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9775 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9776 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9777 if (shdr_status || shdr_add_status || rc) {
9778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9779 "2503 WQ_CREATE mailbox failed with "
9780 "status x%x add_status x%x, mbx status x%x\n",
9781 shdr_status, shdr_add_status, rc);
9782 status = -ENXIO;
9783 goto out;
9784 }
9785 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9786 if (wq->queue_id == 0xFFFF) {
9787 status = -ENXIO;
9788 goto out;
9789 }
9790 wq->type = LPFC_WQ;
9791 wq->subtype = subtype;
9792 wq->host_index = 0;
9793 wq->hba_index = 0;
9794
9795 /* link the wq onto the parent cq child list */
9796 list_add_tail(&wq->list, &cq->child_list);
9797out:
9798 if (rc != MBX_TIMEOUT)
9799 mempool_free(mbox, phba->mbox_mem_pool);
9800 return status;
9801}
9802
9803/**
9804 * lpfc_rq_create - Create a Receive Queue on the HBA
9805 * @phba: HBA structure that indicates port to create a queue on.
9806 * @hrq: The queue structure to use to create the header receive queue.
9807 * @drq: The queue structure to use to create the data receive queue.
9808 * @cq: The completion queue to bind this work queue to.
9809 *
9810 * This function creates a receive buffer queue pair , as detailed in @hrq and
9811 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9812 * to the HBA.
9813 *
9814 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9815 * struct is used to get the entry count that is necessary to determine the
9816 * number of pages to use for this queue. The @cq is used to indicate which
9817 * completion queue to bind received buffers that are posted to these queues to.
9818 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9819 * receive queue pair. This function is asynchronous and will wait for the
9820 * mailbox command to finish before continuing.
9821 *
9822 * On success this function will return a zero. If unable to allocate enough
9823 * memory this function will return ENOMEM. If the queue create mailbox command
9824 * fails this function will return ENXIO.
9825 **/
9826uint32_t
9827lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9828 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9829{
9830 struct lpfc_mbx_rq_create *rq_create;
9831 struct lpfc_dmabuf *dmabuf;
9832 LPFC_MBOXQ_t *mbox;
9833 int rc, length, status = 0;
9834 uint32_t shdr_status, shdr_add_status;
9835 union lpfc_sli4_cfg_shdr *shdr;
9836
9837 if (hrq->entry_count != drq->entry_count)
9838 return -EINVAL;
9839 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9840 if (!mbox)
9841 return -ENOMEM;
9842 length = (sizeof(struct lpfc_mbx_rq_create) -
9843 sizeof(struct lpfc_sli4_cfg_mhdr));
9844 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9845 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9846 length, LPFC_SLI4_MBX_EMBED);
9847 rq_create = &mbox->u.mqe.un.rq_create;
9848 switch (hrq->entry_count) {
9849 default:
9850 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9851 "2535 Unsupported RQ count. (%d)\n",
9852 hrq->entry_count);
9853 if (hrq->entry_count < 512)
9854 return -EINVAL;
9855 /* otherwise default to smallest count (drop through) */
9856 case 512:
9857 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9858 LPFC_RQ_RING_SIZE_512);
9859 break;
9860 case 1024:
9861 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9862 LPFC_RQ_RING_SIZE_1024);
9863 break;
9864 case 2048:
9865 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9866 LPFC_RQ_RING_SIZE_2048);
9867 break;
9868 case 4096:
9869 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9870 LPFC_RQ_RING_SIZE_4096);
9871 break;
9872 }
9873 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9874 cq->queue_id);
9875 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9876 hrq->page_count);
9877 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9878 LPFC_HDR_BUF_SIZE);
9879 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9880 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9881 putPaddrLow(dmabuf->phys);
9882 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9883 putPaddrHigh(dmabuf->phys);
9884 }
9885 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9888 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9889 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9890 if (shdr_status || shdr_add_status || rc) {
9891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9892 "2504 RQ_CREATE mailbox failed with "
9893 "status x%x add_status x%x, mbx status x%x\n",
9894 shdr_status, shdr_add_status, rc);
9895 status = -ENXIO;
9896 goto out;
9897 }
9898 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9899 if (hrq->queue_id == 0xFFFF) {
9900 status = -ENXIO;
9901 goto out;
9902 }
9903 hrq->type = LPFC_HRQ;
9904 hrq->subtype = subtype;
9905 hrq->host_index = 0;
9906 hrq->hba_index = 0;
9907
9908 /* now create the data queue */
9909 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9910 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9911 length, LPFC_SLI4_MBX_EMBED);
9912 switch (drq->entry_count) {
9913 default:
9914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9915 "2536 Unsupported RQ count. (%d)\n",
9916 drq->entry_count);
9917 if (drq->entry_count < 512)
9918 return -EINVAL;
9919 /* otherwise default to smallest count (drop through) */
9920 case 512:
9921 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9922 LPFC_RQ_RING_SIZE_512);
9923 break;
9924 case 1024:
9925 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9926 LPFC_RQ_RING_SIZE_1024);
9927 break;
9928 case 2048:
9929 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9930 LPFC_RQ_RING_SIZE_2048);
9931 break;
9932 case 4096:
9933 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9934 LPFC_RQ_RING_SIZE_4096);
9935 break;
9936 }
9937 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9938 cq->queue_id);
9939 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9940 drq->page_count);
9941 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9942 LPFC_DATA_BUF_SIZE);
9943 list_for_each_entry(dmabuf, &drq->page_list, list) {
9944 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9945 putPaddrLow(dmabuf->phys);
9946 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9947 putPaddrHigh(dmabuf->phys);
9948 }
9949 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9950 /* The IOCTL status is embedded in the mailbox subheader. */
9951 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9952 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9953 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9954 if (shdr_status || shdr_add_status || rc) {
9955 status = -ENXIO;
9956 goto out;
9957 }
9958 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9959 if (drq->queue_id == 0xFFFF) {
9960 status = -ENXIO;
9961 goto out;
9962 }
9963 drq->type = LPFC_DRQ;
9964 drq->subtype = subtype;
9965 drq->host_index = 0;
9966 drq->hba_index = 0;
9967
9968 /* link the header and data RQs onto the parent cq child list */
9969 list_add_tail(&hrq->list, &cq->child_list);
9970 list_add_tail(&drq->list, &cq->child_list);
9971
9972out:
9973 if (rc != MBX_TIMEOUT)
9974 mempool_free(mbox, phba->mbox_mem_pool);
9975 return status;
9976}
9977
9978/**
9979 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9980 * @eq: The queue structure associated with the queue to destroy.
9981 *
9982 * This function destroys a queue, as detailed in @eq by sending an mailbox
9983 * command, specific to the type of queue, to the HBA.
9984 *
9985 * The @eq struct is used to get the queue ID of the queue to destroy.
9986 *
9987 * On success this function will return a zero. If the queue destroy mailbox
9988 * command fails this function will return ENXIO.
9989 **/
9990uint32_t
9991lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9992{
9993 LPFC_MBOXQ_t *mbox;
9994 int rc, length, status = 0;
9995 uint32_t shdr_status, shdr_add_status;
9996 union lpfc_sli4_cfg_shdr *shdr;
9997
9998 if (!eq)
9999 return -ENODEV;
10000 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
10001 if (!mbox)
10002 return -ENOMEM;
10003 length = (sizeof(struct lpfc_mbx_eq_destroy) -
10004 sizeof(struct lpfc_sli4_cfg_mhdr));
10005 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10006 LPFC_MBOX_OPCODE_EQ_DESTROY,
10007 length, LPFC_SLI4_MBX_EMBED);
10008 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
10009 eq->queue_id);
10010 mbox->vport = eq->phba->pport;
10011 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10012
10013 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
10014 /* The IOCTL status is embedded in the mailbox subheader. */
10015 shdr = (union lpfc_sli4_cfg_shdr *)
10016 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
10017 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10018 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10019 if (shdr_status || shdr_add_status || rc) {
10020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10021 "2505 EQ_DESTROY mailbox failed with "
10022 "status x%x add_status x%x, mbx status x%x\n",
10023 shdr_status, shdr_add_status, rc);
10024 status = -ENXIO;
10025 }
10026
10027 /* Remove eq from any list */
10028 list_del_init(&eq->list);
10029 if (rc != MBX_TIMEOUT)
10030 mempool_free(mbox, eq->phba->mbox_mem_pool);
10031 return status;
10032}
10033
10034/**
10035 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
10036 * @cq: The queue structure associated with the queue to destroy.
10037 *
10038 * This function destroys a queue, as detailed in @cq by sending an mailbox
10039 * command, specific to the type of queue, to the HBA.
10040 *
10041 * The @cq struct is used to get the queue ID of the queue to destroy.
10042 *
10043 * On success this function will return a zero. If the queue destroy mailbox
10044 * command fails this function will return ENXIO.
10045 **/
10046uint32_t
10047lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10048{
10049 LPFC_MBOXQ_t *mbox;
10050 int rc, length, status = 0;
10051 uint32_t shdr_status, shdr_add_status;
10052 union lpfc_sli4_cfg_shdr *shdr;
10053
10054 if (!cq)
10055 return -ENODEV;
10056 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
10057 if (!mbox)
10058 return -ENOMEM;
10059 length = (sizeof(struct lpfc_mbx_cq_destroy) -
10060 sizeof(struct lpfc_sli4_cfg_mhdr));
10061 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10062 LPFC_MBOX_OPCODE_CQ_DESTROY,
10063 length, LPFC_SLI4_MBX_EMBED);
10064 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
10065 cq->queue_id);
10066 mbox->vport = cq->phba->pport;
10067 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10068 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
10069 /* The IOCTL status is embedded in the mailbox subheader. */
10070 shdr = (union lpfc_sli4_cfg_shdr *)
10071 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
10072 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10073 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10074 if (shdr_status || shdr_add_status || rc) {
10075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10076 "2506 CQ_DESTROY mailbox failed with "
10077 "status x%x add_status x%x, mbx status x%x\n",
10078 shdr_status, shdr_add_status, rc);
10079 status = -ENXIO;
10080 }
10081 /* Remove cq from any list */
10082 list_del_init(&cq->list);
10083 if (rc != MBX_TIMEOUT)
10084 mempool_free(mbox, cq->phba->mbox_mem_pool);
10085 return status;
10086}
10087
10088/**
10089 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
10090 * @qm: The queue structure associated with the queue to destroy.
10091 *
10092 * This function destroys a queue, as detailed in @mq by sending an mailbox
10093 * command, specific to the type of queue, to the HBA.
10094 *
10095 * The @mq struct is used to get the queue ID of the queue to destroy.
10096 *
10097 * On success this function will return a zero. If the queue destroy mailbox
10098 * command fails this function will return ENXIO.
10099 **/
10100uint32_t
10101lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10102{
10103 LPFC_MBOXQ_t *mbox;
10104 int rc, length, status = 0;
10105 uint32_t shdr_status, shdr_add_status;
10106 union lpfc_sli4_cfg_shdr *shdr;
10107
10108 if (!mq)
10109 return -ENODEV;
10110 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
10111 if (!mbox)
10112 return -ENOMEM;
10113 length = (sizeof(struct lpfc_mbx_mq_destroy) -
10114 sizeof(struct lpfc_sli4_cfg_mhdr));
10115 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10116 LPFC_MBOX_OPCODE_MQ_DESTROY,
10117 length, LPFC_SLI4_MBX_EMBED);
10118 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
10119 mq->queue_id);
10120 mbox->vport = mq->phba->pport;
10121 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10122 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
10123 /* The IOCTL status is embedded in the mailbox subheader. */
10124 shdr = (union lpfc_sli4_cfg_shdr *)
10125 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
10126 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10127 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10128 if (shdr_status || shdr_add_status || rc) {
10129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10130 "2507 MQ_DESTROY mailbox failed with "
10131 "status x%x add_status x%x, mbx status x%x\n",
10132 shdr_status, shdr_add_status, rc);
10133 status = -ENXIO;
10134 }
10135 /* Remove mq from any list */
10136 list_del_init(&mq->list);
10137 if (rc != MBX_TIMEOUT)
10138 mempool_free(mbox, mq->phba->mbox_mem_pool);
10139 return status;
10140}
10141
10142/**
10143 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10144 * @wq: The queue structure associated with the queue to destroy.
10145 *
10146 * This function destroys a queue, as detailed in @wq by sending an mailbox
10147 * command, specific to the type of queue, to the HBA.
10148 *
10149 * The @wq struct is used to get the queue ID of the queue to destroy.
10150 *
10151 * On success this function will return a zero. If the queue destroy mailbox
10152 * command fails this function will return ENXIO.
10153 **/
10154uint32_t
10155lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10156{
10157 LPFC_MBOXQ_t *mbox;
10158 int rc, length, status = 0;
10159 uint32_t shdr_status, shdr_add_status;
10160 union lpfc_sli4_cfg_shdr *shdr;
10161
10162 if (!wq)
10163 return -ENODEV;
10164 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10165 if (!mbox)
10166 return -ENOMEM;
10167 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10168 sizeof(struct lpfc_sli4_cfg_mhdr));
10169 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10170 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10171 length, LPFC_SLI4_MBX_EMBED);
10172 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10173 wq->queue_id);
10174 mbox->vport = wq->phba->pport;
10175 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10176 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10177 shdr = (union lpfc_sli4_cfg_shdr *)
10178 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10179 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10180 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10181 if (shdr_status || shdr_add_status || rc) {
10182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10183 "2508 WQ_DESTROY mailbox failed with "
10184 "status x%x add_status x%x, mbx status x%x\n",
10185 shdr_status, shdr_add_status, rc);
10186 status = -ENXIO;
10187 }
10188 /* Remove wq from any list */
10189 list_del_init(&wq->list);
10190 if (rc != MBX_TIMEOUT)
10191 mempool_free(mbox, wq->phba->mbox_mem_pool);
10192 return status;
10193}
10194
10195/**
10196 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10197 * @rq: The queue structure associated with the queue to destroy.
10198 *
10199 * This function destroys a queue, as detailed in @rq by sending an mailbox
10200 * command, specific to the type of queue, to the HBA.
10201 *
10202 * The @rq struct is used to get the queue ID of the queue to destroy.
10203 *
10204 * On success this function will return a zero. If the queue destroy mailbox
10205 * command fails this function will return ENXIO.
10206 **/
10207uint32_t
10208lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10209 struct lpfc_queue *drq)
10210{
10211 LPFC_MBOXQ_t *mbox;
10212 int rc, length, status = 0;
10213 uint32_t shdr_status, shdr_add_status;
10214 union lpfc_sli4_cfg_shdr *shdr;
10215
10216 if (!hrq || !drq)
10217 return -ENODEV;
10218 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10219 if (!mbox)
10220 return -ENOMEM;
10221 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10222 sizeof(struct mbox_header));
10223 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10224 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10225 length, LPFC_SLI4_MBX_EMBED);
10226 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10227 hrq->queue_id);
10228 mbox->vport = hrq->phba->pport;
10229 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10230 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10231 /* The IOCTL status is embedded in the mailbox subheader. */
10232 shdr = (union lpfc_sli4_cfg_shdr *)
10233 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10234 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10235 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10236 if (shdr_status || shdr_add_status || rc) {
10237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10238 "2509 RQ_DESTROY mailbox failed with "
10239 "status x%x add_status x%x, mbx status x%x\n",
10240 shdr_status, shdr_add_status, rc);
10241 if (rc != MBX_TIMEOUT)
10242 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10243 return -ENXIO;
10244 }
10245 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10246 drq->queue_id);
10247 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10248 shdr = (union lpfc_sli4_cfg_shdr *)
10249 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10250 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10251 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10252 if (shdr_status || shdr_add_status || rc) {
10253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10254 "2510 RQ_DESTROY mailbox failed with "
10255 "status x%x add_status x%x, mbx status x%x\n",
10256 shdr_status, shdr_add_status, rc);
10257 status = -ENXIO;
10258 }
10259 list_del_init(&hrq->list);
10260 list_del_init(&drq->list);
10261 if (rc != MBX_TIMEOUT)
10262 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10263 return status;
10264}
10265
10266/**
10267 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10268 * @phba: The virtual port for which this call being executed.
10269 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10270 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10271 * @xritag: the xritag that ties this io to the SGL pages.
10272 *
10273 * This routine will post the sgl pages for the IO that has the xritag
10274 * that is in the iocbq structure. The xritag is assigned during iocbq
10275 * creation and persists for as long as the driver is loaded.
10276 * if the caller has fewer than 256 scatter gather segments to map then
10277 * pdma_phys_addr1 should be 0.
10278 * If the caller needs to map more than 256 scatter gather segment then
10279 * pdma_phys_addr1 should be a valid physical address.
10280 * physical address for SGLs must be 64 byte aligned.
10281 * If you are going to map 2 SGL's then the first one must have 256 entries
10282 * the second sgl can have between 1 and 256 entries.
10283 *
10284 * Return codes:
10285 * 0 - Success
10286 * -ENXIO, -ENOMEM - Failure
10287 **/
10288int
10289lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10290 dma_addr_t pdma_phys_addr0,
10291 dma_addr_t pdma_phys_addr1,
10292 uint16_t xritag)
10293{
10294 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10295 LPFC_MBOXQ_t *mbox;
10296 int rc;
10297 uint32_t shdr_status, shdr_add_status;
10298 union lpfc_sli4_cfg_shdr *shdr;
10299
10300 if (xritag == NO_XRI) {
10301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10302 "0364 Invalid param:\n");
10303 return -EINVAL;
10304 }
10305
10306 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10307 if (!mbox)
10308 return -ENOMEM;
10309
10310 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10311 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10312 sizeof(struct lpfc_mbx_post_sgl_pages) -
10313 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10314
10315 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10316 &mbox->u.mqe.un.post_sgl_pages;
10317 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10318 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10319
10320 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10321 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10322 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10323 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10324
10325 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10326 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10327 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10328 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10329 if (!phba->sli4_hba.intr_enable)
10330 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10331 else
10332 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10333 /* The IOCTL status is embedded in the mailbox subheader. */
10334 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10337 if (rc != MBX_TIMEOUT)
10338 mempool_free(mbox, phba->mbox_mem_pool);
10339 if (shdr_status || shdr_add_status || rc) {
10340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10341 "2511 POST_SGL mailbox failed with "
10342 "status x%x add_status x%x, mbx status x%x\n",
10343 shdr_status, shdr_add_status, rc);
10344 rc = -ENXIO;
10345 }
10346 return 0;
10347}
10348/**
10349 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10350 * @phba: The virtual port for which this call being executed.
10351 *
10352 * This routine will remove all of the sgl pages registered with the hba.
10353 *
10354 * Return codes:
10355 * 0 - Success
10356 * -ENXIO, -ENOMEM - Failure
10357 **/
10358int
10359lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10360{
10361 LPFC_MBOXQ_t *mbox;
10362 int rc;
10363 uint32_t shdr_status, shdr_add_status;
10364 union lpfc_sli4_cfg_shdr *shdr;
10365
10366 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10367 if (!mbox)
10368 return -ENOMEM;
10369
10370 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10371 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10372 LPFC_SLI4_MBX_EMBED);
10373 if (!phba->sli4_hba.intr_enable)
10374 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10375 else
10376 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10377 /* The IOCTL status is embedded in the mailbox subheader. */
10378 shdr = (union lpfc_sli4_cfg_shdr *)
10379 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10380 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10381 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10382 if (rc != MBX_TIMEOUT)
10383 mempool_free(mbox, phba->mbox_mem_pool);
10384 if (shdr_status || shdr_add_status || rc) {
10385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10386 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10387 "status x%x add_status x%x, mbx status x%x\n",
10388 shdr_status, shdr_add_status, rc);
10389 rc = -ENXIO;
10390 }
10391 return rc;
10392}
10393
10394/**
10395 * lpfc_sli4_next_xritag - Get an xritag for the io
10396 * @phba: Pointer to HBA context object.
10397 *
10398 * This function gets an xritag for the iocb. If there is no unused xritag
10399 * it will return 0xffff.
10400 * The function returns the allocated xritag if successful, else returns zero.
10401 * Zero is not a valid xritag.
10402 * The caller is not required to hold any lock.
10403 **/
10404uint16_t
10405lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10406{
10407 uint16_t xritag;
10408
10409 spin_lock_irq(&phba->hbalock);
10410 xritag = phba->sli4_hba.next_xri;
10411 if ((xritag != (uint16_t) -1) && xritag <
10412 (phba->sli4_hba.max_cfg_param.max_xri
10413 + phba->sli4_hba.max_cfg_param.xri_base)) {
10414 phba->sli4_hba.next_xri++;
10415 phba->sli4_hba.max_cfg_param.xri_used++;
10416 spin_unlock_irq(&phba->hbalock);
10417 return xritag;
10418 }
10419 spin_unlock_irq(&phba->hbalock);
10420
10421 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10422 "2004 Failed to allocate XRI.last XRITAG is %d"
10423 " Max XRI is %d, Used XRI is %d\n",
10424 phba->sli4_hba.next_xri,
10425 phba->sli4_hba.max_cfg_param.max_xri,
10426 phba->sli4_hba.max_cfg_param.xri_used);
10427 return -1;
10428}
10429
10430/**
10431 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10432 * @phba: pointer to lpfc hba data structure.
10433 *
10434 * This routine is invoked to post a block of driver's sgl pages to the
10435 * HBA using non-embedded mailbox command. No Lock is held. This routine
10436 * is only called when the driver is loading and after all IO has been
10437 * stopped.
10438 **/
10439int
10440lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10441{
10442 struct lpfc_sglq *sglq_entry;
10443 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10444 struct sgl_page_pairs *sgl_pg_pairs;
10445 void *viraddr;
10446 LPFC_MBOXQ_t *mbox;
10447 uint32_t reqlen, alloclen, pg_pairs;
10448 uint32_t mbox_tmo;
10449 uint16_t xritag_start = 0;
10450 int els_xri_cnt, rc = 0;
10451 uint32_t shdr_status, shdr_add_status;
10452 union lpfc_sli4_cfg_shdr *shdr;
10453
10454 /* The number of sgls to be posted */
10455 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10456
10457 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10458 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10459 if (reqlen > PAGE_SIZE) {
10460 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10461 "2559 Block sgl registration required DMA "
10462 "size (%d) great than a page\n", reqlen);
10463 return -ENOMEM;
10464 }
10465 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10466 if (!mbox) {
10467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10468 "2560 Failed to allocate mbox cmd memory\n");
10469 return -ENOMEM;
10470 }
10471
10472 /* Allocate DMA memory and set up the non-embedded mailbox command */
10473 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10474 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10475 LPFC_SLI4_MBX_NEMBED);
10476
10477 if (alloclen < reqlen) {
10478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10479 "0285 Allocated DMA memory size (%d) is "
10480 "less than the requested DMA memory "
10481 "size (%d)\n", alloclen, reqlen);
10482 lpfc_sli4_mbox_cmd_free(phba, mbox);
10483 return -ENOMEM;
10484 }
10485
10486 /* Get the first SGE entry from the non-embedded DMA memory */
10487 if (unlikely(!mbox->sge_array)) {
10488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10489 "2525 Failed to get the non-embedded SGE "
10490 "virtual address\n");
10491 lpfc_sli4_mbox_cmd_free(phba, mbox);
10492 return -ENOMEM;
10493 }
10494 viraddr = mbox->sge_array->addr[0];
10495
10496 /* Set up the SGL pages in the non-embedded DMA pages */
10497 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10498 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10499
10500 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10501 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10502 /* Set up the sge entry */
10503 sgl_pg_pairs->sgl_pg0_addr_lo =
10504 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10505 sgl_pg_pairs->sgl_pg0_addr_hi =
10506 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10507 sgl_pg_pairs->sgl_pg1_addr_lo =
10508 cpu_to_le32(putPaddrLow(0));
10509 sgl_pg_pairs->sgl_pg1_addr_hi =
10510 cpu_to_le32(putPaddrHigh(0));
10511 /* Keep the first xritag on the list */
10512 if (pg_pairs == 0)
10513 xritag_start = sglq_entry->sli4_xritag;
10514 sgl_pg_pairs++;
10515 }
10516 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10517 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10518 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10519 /* Perform endian conversion if necessary */
10520 sgl->word0 = cpu_to_le32(sgl->word0);
10521
10522 if (!phba->sli4_hba.intr_enable)
10523 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10524 else {
10525 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10526 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10527 }
10528 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10529 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10530 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10531 if (rc != MBX_TIMEOUT)
10532 lpfc_sli4_mbox_cmd_free(phba, mbox);
10533 if (shdr_status || shdr_add_status || rc) {
10534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10535 "2513 POST_SGL_BLOCK mailbox command failed "
10536 "status x%x add_status x%x mbx status x%x\n",
10537 shdr_status, shdr_add_status, rc);
10538 rc = -ENXIO;
10539 }
10540 return rc;
10541}
10542
10543/**
10544 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10545 * @phba: pointer to lpfc hba data structure.
10546 * @sblist: pointer to scsi buffer list.
10547 * @count: number of scsi buffers on the list.
10548 *
10549 * This routine is invoked to post a block of @count scsi sgl pages from a
10550 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10551 * No Lock is held.
10552 *
10553 **/
10554int
10555lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10556 int cnt)
10557{
10558 struct lpfc_scsi_buf *psb;
10559 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10560 struct sgl_page_pairs *sgl_pg_pairs;
10561 void *viraddr;
10562 LPFC_MBOXQ_t *mbox;
10563 uint32_t reqlen, alloclen, pg_pairs;
10564 uint32_t mbox_tmo;
10565 uint16_t xritag_start = 0;
10566 int rc = 0;
10567 uint32_t shdr_status, shdr_add_status;
10568 dma_addr_t pdma_phys_bpl1;
10569 union lpfc_sli4_cfg_shdr *shdr;
10570
10571 /* Calculate the requested length of the dma memory */
10572 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10573 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10574 if (reqlen > PAGE_SIZE) {
10575 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10576 "0217 Block sgl registration required DMA "
10577 "size (%d) great than a page\n", reqlen);
10578 return -ENOMEM;
10579 }
10580 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10581 if (!mbox) {
10582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10583 "0283 Failed to allocate mbox cmd memory\n");
10584 return -ENOMEM;
10585 }
10586
10587 /* Allocate DMA memory and set up the non-embedded mailbox command */
10588 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10589 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10590 LPFC_SLI4_MBX_NEMBED);
10591
10592 if (alloclen < reqlen) {
10593 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10594 "2561 Allocated DMA memory size (%d) is "
10595 "less than the requested DMA memory "
10596 "size (%d)\n", alloclen, reqlen);
10597 lpfc_sli4_mbox_cmd_free(phba, mbox);
10598 return -ENOMEM;
10599 }
10600
10601 /* Get the first SGE entry from the non-embedded DMA memory */
10602 if (unlikely(!mbox->sge_array)) {
10603 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10604 "2565 Failed to get the non-embedded SGE "
10605 "virtual address\n");
10606 lpfc_sli4_mbox_cmd_free(phba, mbox);
10607 return -ENOMEM;
10608 }
10609 viraddr = mbox->sge_array->addr[0];
10610
10611 /* Set up the SGL pages in the non-embedded DMA pages */
10612 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10613 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10614
10615 pg_pairs = 0;
10616 list_for_each_entry(psb, sblist, list) {
10617 /* Set up the sge entry */
10618 sgl_pg_pairs->sgl_pg0_addr_lo =
10619 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10620 sgl_pg_pairs->sgl_pg0_addr_hi =
10621 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10622 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10623 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10624 else
10625 pdma_phys_bpl1 = 0;
10626 sgl_pg_pairs->sgl_pg1_addr_lo =
10627 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10628 sgl_pg_pairs->sgl_pg1_addr_hi =
10629 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10630 /* Keep the first xritag on the list */
10631 if (pg_pairs == 0)
10632 xritag_start = psb->cur_iocbq.sli4_xritag;
10633 sgl_pg_pairs++;
10634 pg_pairs++;
10635 }
10636 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10637 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10638 /* Perform endian conversion if necessary */
10639 sgl->word0 = cpu_to_le32(sgl->word0);
10640
10641 if (!phba->sli4_hba.intr_enable)
10642 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10643 else {
10644 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10645 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10646 }
10647 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10648 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10649 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10650 if (rc != MBX_TIMEOUT)
10651 lpfc_sli4_mbox_cmd_free(phba, mbox);
10652 if (shdr_status || shdr_add_status || rc) {
10653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10654 "2564 POST_SGL_BLOCK mailbox command failed "
10655 "status x%x add_status x%x mbx status x%x\n",
10656 shdr_status, shdr_add_status, rc);
10657 rc = -ENXIO;
10658 }
10659 return rc;
10660}
10661
10662/**
10663 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10664 * @phba: pointer to lpfc_hba struct that the frame was received on
10665 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10666 *
10667 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10668 * valid type of frame that the LPFC driver will handle. This function will
10669 * return a zero if the frame is a valid frame or a non zero value when the
10670 * frame does not pass the check.
10671 **/
10672static int
10673lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10674{
10675 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10676 char *type_names[] = FC_TYPE_NAMES_INIT;
10677 struct fc_vft_header *fc_vft_hdr;
10678
10679 switch (fc_hdr->fh_r_ctl) {
10680 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10681 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10682 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10683 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10684 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10685 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10686 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10687 case FC_RCTL_DD_CMD_STATUS: /* command status */
10688 case FC_RCTL_ELS_REQ: /* extended link services request */
10689 case FC_RCTL_ELS_REP: /* extended link services reply */
10690 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10691 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10692 case FC_RCTL_BA_NOP: /* basic link service NOP */
10693 case FC_RCTL_BA_ABTS: /* basic link service abort */
10694 case FC_RCTL_BA_RMC: /* remove connection */
10695 case FC_RCTL_BA_ACC: /* basic accept */
10696 case FC_RCTL_BA_RJT: /* basic reject */
10697 case FC_RCTL_BA_PRMT:
10698 case FC_RCTL_ACK_1: /* acknowledge_1 */
10699 case FC_RCTL_ACK_0: /* acknowledge_0 */
10700 case FC_RCTL_P_RJT: /* port reject */
10701 case FC_RCTL_F_RJT: /* fabric reject */
10702 case FC_RCTL_P_BSY: /* port busy */
10703 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10704 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10705 case FC_RCTL_LCR: /* link credit reset */
10706 case FC_RCTL_END: /* end */
10707 break;
10708 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10709 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10710 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10711 return lpfc_fc_frame_check(phba, fc_hdr);
10712 default:
10713 goto drop;
10714 }
10715 switch (fc_hdr->fh_type) {
10716 case FC_TYPE_BLS:
10717 case FC_TYPE_ELS:
10718 case FC_TYPE_FCP:
10719 case FC_TYPE_CT:
10720 break;
10721 case FC_TYPE_IP:
10722 case FC_TYPE_ILS:
10723 default:
10724 goto drop;
10725 }
10726 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10727 "2538 Received frame rctl:%s type:%s\n",
10728 rctl_names[fc_hdr->fh_r_ctl],
10729 type_names[fc_hdr->fh_type]);
10730 return 0;
10731drop:
10732 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10733 "2539 Dropped frame rctl:%s type:%s\n",
10734 rctl_names[fc_hdr->fh_r_ctl],
10735 type_names[fc_hdr->fh_type]);
10736 return 1;
10737}
10738
10739/**
10740 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10741 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10742 *
10743 * This function processes the FC header to retrieve the VFI from the VF
10744 * header, if one exists. This function will return the VFI if one exists
10745 * or 0 if no VSAN Header exists.
10746 **/
10747static uint32_t
10748lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10749{
10750 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10751
10752 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10753 return 0;
10754 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10755}
10756
10757/**
10758 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10759 * @phba: Pointer to the HBA structure to search for the vport on
10760 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10761 * @fcfi: The FC Fabric ID that the frame came from
10762 *
10763 * This function searches the @phba for a vport that matches the content of the
10764 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10765 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10766 * returns the matching vport pointer or NULL if unable to match frame to a
10767 * vport.
10768 **/
10769static struct lpfc_vport *
10770lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10771 uint16_t fcfi)
10772{
10773 struct lpfc_vport **vports;
10774 struct lpfc_vport *vport = NULL;
10775 int i;
10776 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10777 fc_hdr->fh_d_id[1] << 8 |
10778 fc_hdr->fh_d_id[2]);
10779
10780 vports = lpfc_create_vport_work_array(phba);
10781 if (vports != NULL)
10782 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10783 if (phba->fcf.fcfi == fcfi &&
10784 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10785 vports[i]->fc_myDID == did) {
10786 vport = vports[i];
10787 break;
10788 }
10789 }
10790 lpfc_destroy_vport_work_array(phba, vports);
10791 return vport;
10792}
10793
10794/**
10795 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10796 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10797 *
10798 * This function searches through the existing incomplete sequences that have
10799 * been sent to this @vport. If the frame matches one of the incomplete
10800 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10801 * make up that sequence. If no sequence is found that matches this frame then
10802 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10803 * This function returns a pointer to the first dmabuf in the sequence list that
10804 * the frame was linked to.
10805 **/
10806static struct hbq_dmabuf *
10807lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10808{
10809 struct fc_frame_header *new_hdr;
10810 struct fc_frame_header *temp_hdr;
10811 struct lpfc_dmabuf *d_buf;
10812 struct lpfc_dmabuf *h_buf;
10813 struct hbq_dmabuf *seq_dmabuf = NULL;
10814 struct hbq_dmabuf *temp_dmabuf = NULL;
10815
10816 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10817 /* Use the hdr_buf to find the sequence that this frame belongs to */
10818 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10819 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10820 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10821 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10822 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10823 continue;
10824 /* found a pending sequence that matches this frame */
10825 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10826 break;
10827 }
10828 if (!seq_dmabuf) {
10829 /*
10830 * This indicates first frame received for this sequence.
10831 * Queue the buffer on the vport's rcv_buffer_list.
10832 */
10833 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10834 return dmabuf;
10835 }
10836 temp_hdr = seq_dmabuf->hbuf.virt;
10837 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10838 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10839 return dmabuf;
10840 }
10841 /* find the correct place in the sequence to insert this frame */
10842 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10843 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10844 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10845 /*
10846 * If the frame's sequence count is greater than the frame on
10847 * the list then insert the frame right after this frame
10848 */
10849 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10850 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10851 return seq_dmabuf;
10852 }
10853 }
10854 return NULL;
10855}
10856
10857/**
10858 * lpfc_seq_complete - Indicates if a sequence is complete
10859 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10860 *
10861 * This function checks the sequence, starting with the frame described by
10862 * @dmabuf, to see if all the frames associated with this sequence are present.
10863 * the frames associated with this sequence are linked to the @dmabuf using the
10864 * dbuf list. This function looks for two major things. 1) That the first frame
10865 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10866 * set. 3) That there are no holes in the sequence count. The function will
10867 * return 1 when the sequence is complete, otherwise it will return 0.
10868 **/
10869static int
10870lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10871{
10872 struct fc_frame_header *hdr;
10873 struct lpfc_dmabuf *d_buf;
10874 struct hbq_dmabuf *seq_dmabuf;
10875 uint32_t fctl;
10876 int seq_count = 0;
10877
10878 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10879 /* make sure first fame of sequence has a sequence count of zero */
10880 if (hdr->fh_seq_cnt != seq_count)
10881 return 0;
10882 fctl = (hdr->fh_f_ctl[0] << 16 |
10883 hdr->fh_f_ctl[1] << 8 |
10884 hdr->fh_f_ctl[2]);
10885 /* If last frame of sequence we can return success. */
10886 if (fctl & FC_FC_END_SEQ)
10887 return 1;
10888 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10889 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10890 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10891 /* If there is a hole in the sequence count then fail. */
10892 if (++seq_count != hdr->fh_seq_cnt)
10893 return 0;
10894 fctl = (hdr->fh_f_ctl[0] << 16 |
10895 hdr->fh_f_ctl[1] << 8 |
10896 hdr->fh_f_ctl[2]);
10897 /* If last frame of sequence we can return success. */
10898 if (fctl & FC_FC_END_SEQ)
10899 return 1;
10900 }
10901 return 0;
10902}
10903
10904/**
10905 * lpfc_prep_seq - Prep sequence for ULP processing
10906 * @vport: Pointer to the vport on which this sequence was received
10907 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10908 *
10909 * This function takes a sequence, described by a list of frames, and creates
10910 * a list of iocbq structures to describe the sequence. This iocbq list will be
10911 * used to issue to the generic unsolicited sequence handler. This routine
10912 * returns a pointer to the first iocbq in the list. If the function is unable
10913 * to allocate an iocbq then it throw out the received frames that were not
10914 * able to be described and return a pointer to the first iocbq. If unable to
10915 * allocate any iocbqs (including the first) this function will return NULL.
10916 **/
10917static struct lpfc_iocbq *
10918lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10919{
10920 struct lpfc_dmabuf *d_buf, *n_buf;
10921 struct lpfc_iocbq *first_iocbq, *iocbq;
10922 struct fc_frame_header *fc_hdr;
10923 uint32_t sid;
10924
10925 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10926 /* remove from receive buffer list */
10927 list_del_init(&seq_dmabuf->hbuf.list);
10928 /* get the Remote Port's SID */
10929 sid = (fc_hdr->fh_s_id[0] << 16 |
10930 fc_hdr->fh_s_id[1] << 8 |
10931 fc_hdr->fh_s_id[2]);
10932 /* Get an iocbq struct to fill in. */
10933 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10934 if (first_iocbq) {
10935 /* Initialize the first IOCB. */
10936 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10937 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10938 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10939 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10940 vport->vpi + vport->phba->vpi_base;
10941 /* put the first buffer into the first IOCBq */
10942 first_iocbq->context2 = &seq_dmabuf->dbuf;
10943 first_iocbq->context3 = NULL;
10944 first_iocbq->iocb.ulpBdeCount = 1;
10945 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10946 LPFC_DATA_BUF_SIZE;
10947 first_iocbq->iocb.un.rcvels.remoteID = sid;
10948 }
10949 iocbq = first_iocbq;
10950 /*
10951 * Each IOCBq can have two Buffers assigned, so go through the list
10952 * of buffers for this sequence and save two buffers in each IOCBq
10953 */
10954 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10955 if (!iocbq) {
10956 lpfc_in_buf_free(vport->phba, d_buf);
10957 continue;
10958 }
10959 if (!iocbq->context3) {
10960 iocbq->context3 = d_buf;
10961 iocbq->iocb.ulpBdeCount++;
10962 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10963 LPFC_DATA_BUF_SIZE;
10964 } else {
10965 iocbq = lpfc_sli_get_iocbq(vport->phba);
10966 if (!iocbq) {
10967 if (first_iocbq) {
10968 first_iocbq->iocb.ulpStatus =
10969 IOSTAT_FCP_RSP_ERROR;
10970 first_iocbq->iocb.un.ulpWord[4] =
10971 IOERR_NO_RESOURCES;
10972 }
10973 lpfc_in_buf_free(vport->phba, d_buf);
10974 continue;
10975 }
10976 iocbq->context2 = d_buf;
10977 iocbq->context3 = NULL;
10978 iocbq->iocb.ulpBdeCount = 1;
10979 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10980 LPFC_DATA_BUF_SIZE;
10981 iocbq->iocb.un.rcvels.remoteID = sid;
10982 list_add_tail(&iocbq->list, &first_iocbq->list);
10983 }
10984 }
10985 return first_iocbq;
10986}
10987
10988/**
10989 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10990 * @phba: Pointer to HBA context object.
10991 *
10992 * This function is called with no lock held. This function processes all
10993 * the received buffers and gives it to upper layers when a received buffer
10994 * indicates that it is the final frame in the sequence. The interrupt
10995 * service routine processes received buffers at interrupt contexts and adds
10996 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10997 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10998 * appropriate receive function when the final frame in a sequence is received.
10999 **/
11000int
11001lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11002{
11003 LIST_HEAD(cmplq);
11004 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11005 struct fc_frame_header *fc_hdr;
11006 struct lpfc_vport *vport;
11007 uint32_t fcfi;
11008 struct lpfc_iocbq *iocbq;
11009
11010 /* Clear hba flag and get all received buffers into the cmplq */
11011 spin_lock_irq(&phba->hbalock);
11012 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11013 list_splice_init(&phba->rb_pend_list, &cmplq);
11014 spin_unlock_irq(&phba->hbalock);
11015
11016 /* Process each received buffer */
11017 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
11018 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11019 /* check to see if this a valid type of frame */
11020 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11021 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11022 continue;
11023 }
11024 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
11025 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11026 if (!vport) {
11027 /* throw out the frame */
11028 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11029 continue;
11030 }
11031 /* Link this frame */
11032 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11033 if (!seq_dmabuf) {
11034 /* unable to add frame to vport - throw it out */
11035 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11036 continue;
11037 }
11038 /* If not last frame in sequence continue processing frames. */
11039 if (!lpfc_seq_complete(seq_dmabuf)) {
11040 /*
11041 * When saving off frames post a new one and mark this
11042 * frame to be freed when it is finished.
11043 **/
11044 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11045 dmabuf->tag = -1;
11046 continue;
11047 }
11048 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11049 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11050 if (!lpfc_complete_unsol_iocb(phba,
11051 &phba->sli.ring[LPFC_ELS_RING],
11052 iocbq, fc_hdr->fh_r_ctl,
11053 fc_hdr->fh_type))
11054 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11055 "2540 Ring %d handler: unexpected Rctl "
11056 "x%x Type x%x received\n",
11057 LPFC_ELS_RING,
11058 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11059 };
11060 return 0;
11061}
11062
11063/**
11064 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
11065 * @phba: pointer to lpfc hba data structure.
11066 *
11067 * This routine is invoked to post rpi header templates to the
11068 * HBA consistent with the SLI-4 interface spec. This routine
11069 * posts a PAGE_SIZE memory region to the port to hold up to
11070 * PAGE_SIZE modulo 64 rpi context headers.
11071 *
11072 * This routine does not require any locks. It's usage is expected
11073 * to be driver load or reset recovery when the driver is
11074 * sequential.
11075 *
11076 * Return codes
11077 * 0 - sucessful
11078 * EIO - The mailbox failed to complete successfully.
11079 * When this error occurs, the driver is not guaranteed
11080 * to have any rpi regions posted to the device and
11081 * must either attempt to repost the regions or take a
11082 * fatal error.
11083 **/
11084int
11085lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11086{
11087 struct lpfc_rpi_hdr *rpi_page;
11088 uint32_t rc = 0;
11089
11090 /* Post all rpi memory regions to the port. */
11091 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
11092 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
11093 if (rc != MBX_SUCCESS) {
11094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11095 "2008 Error %d posting all rpi "
11096 "headers\n", rc);
11097 rc = -EIO;
11098 break;
11099 }
11100 }
11101
11102 return rc;
11103}
11104
11105/**
11106 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
11107 * @phba: pointer to lpfc hba data structure.
11108 * @rpi_page: pointer to the rpi memory region.
11109 *
11110 * This routine is invoked to post a single rpi header to the
11111 * HBA consistent with the SLI-4 interface spec. This memory region
11112 * maps up to 64 rpi context regions.
11113 *
11114 * Return codes
11115 * 0 - sucessful
11116 * ENOMEM - No available memory
11117 * EIO - The mailbox failed to complete successfully.
11118 **/
11119int
11120lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11121{
11122 LPFC_MBOXQ_t *mboxq;
11123 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
11124 uint32_t rc = 0;
11125 uint32_t mbox_tmo;
11126 uint32_t shdr_status, shdr_add_status;
11127 union lpfc_sli4_cfg_shdr *shdr;
11128
11129 /* The port is notified of the header region via a mailbox command. */
11130 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11131 if (!mboxq) {
11132 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11133 "2001 Unable to allocate memory for issuing "
11134 "SLI_CONFIG_SPECIAL mailbox command\n");
11135 return -ENOMEM;
11136 }
11137
11138 /* Post all rpi memory regions to the port. */
11139 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11140 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11141 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11142 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11143 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11144 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11145 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11146 hdr_tmpl, rpi_page->page_count);
11147 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11148 rpi_page->start_rpi);
11149 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11150 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11152 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11155 if (rc != MBX_TIMEOUT)
11156 mempool_free(mboxq, phba->mbox_mem_pool);
11157 if (shdr_status || shdr_add_status || rc) {
11158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11159 "2514 POST_RPI_HDR mailbox failed with "
11160 "status x%x add_status x%x, mbx status x%x\n",
11161 shdr_status, shdr_add_status, rc);
11162 rc = -ENXIO;
11163 }
11164 return rc;
11165}
11166
11167/**
11168 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11169 * @phba: pointer to lpfc hba data structure.
11170 *
11171 * This routine is invoked to post rpi header templates to the
11172 * HBA consistent with the SLI-4 interface spec. This routine
11173 * posts a PAGE_SIZE memory region to the port to hold up to
11174 * PAGE_SIZE modulo 64 rpi context headers.
11175 *
11176 * Returns
11177 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11178 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11179 **/
11180int
11181lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11182{
11183 int rpi;
11184 uint16_t max_rpi, rpi_base, rpi_limit;
11185 uint16_t rpi_remaining;
11186 struct lpfc_rpi_hdr *rpi_hdr;
11187
11188 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11189 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11190 rpi_limit = phba->sli4_hba.next_rpi;
11191
11192 /*
11193 * The valid rpi range is not guaranteed to be zero-based. Start
11194 * the search at the rpi_base as reported by the port.
11195 */
11196 spin_lock_irq(&phba->hbalock);
11197 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11198 if (rpi >= rpi_limit || rpi < rpi_base)
11199 rpi = LPFC_RPI_ALLOC_ERROR;
11200 else {
11201 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11202 phba->sli4_hba.max_cfg_param.rpi_used++;
11203 phba->sli4_hba.rpi_count++;
11204 }
11205
11206 /*
11207 * Don't try to allocate more rpi header regions if the device limit
11208 * on available rpis max has been exhausted.
11209 */
11210 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11211 (phba->sli4_hba.rpi_count >= max_rpi)) {
11212 spin_unlock_irq(&phba->hbalock);
11213 return rpi;
11214 }
11215
11216 /*
11217 * If the driver is running low on rpi resources, allocate another
11218 * page now. Note that the next_rpi value is used because
11219 * it represents how many are actually in use whereas max_rpi notes
11220 * how many are supported max by the device.
11221 */
11222 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11223 phba->sli4_hba.rpi_count;
11224 spin_unlock_irq(&phba->hbalock);
11225 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11226 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11227 if (!rpi_hdr) {
11228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11229 "2002 Error Could not grow rpi "
11230 "count\n");
11231 } else {
11232 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11233 }
11234 }
11235
11236 return rpi;
11237}
11238
11239/**
11240 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11241 * @phba: pointer to lpfc hba data structure.
11242 *
11243 * This routine is invoked to release an rpi to the pool of
11244 * available rpis maintained by the driver.
11245 **/
11246void
11247lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11248{
11249 spin_lock_irq(&phba->hbalock);
11250 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11251 phba->sli4_hba.rpi_count--;
11252 phba->sli4_hba.max_cfg_param.rpi_used--;
11253 spin_unlock_irq(&phba->hbalock);
11254}
11255
11256/**
11257 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11258 * @phba: pointer to lpfc hba data structure.
11259 *
11260 * This routine is invoked to remove the memory region that
11261 * provided rpi via a bitmask.
11262 **/
11263void
11264lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11265{
11266 kfree(phba->sli4_hba.rpi_bmask);
11267}
11268
11269/**
11270 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11271 * @phba: pointer to lpfc hba data structure.
11272 *
11273 * This routine is invoked to remove the memory region that
11274 * provided rpi via a bitmask.
11275 **/
11276int
11277lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11278{
11279 LPFC_MBOXQ_t *mboxq;
11280 struct lpfc_hba *phba = ndlp->phba;
11281 int rc;
11282
11283 /* The port is notified of the header region via a mailbox command. */
11284 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11285 if (!mboxq)
11286 return -ENOMEM;
11287
11288 /* Post all rpi memory regions to the port. */
11289 lpfc_resume_rpi(mboxq, ndlp);
11290 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11291 if (rc == MBX_NOT_FINISHED) {
11292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11293 "2010 Resume RPI Mailbox failed "
11294 "status %d, mbxStatus x%x\n", rc,
11295 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11296 mempool_free(mboxq, phba->mbox_mem_pool);
11297 return -EIO;
11298 }
11299 return 0;
11300}
11301
11302/**
11303 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11304 * @phba: pointer to lpfc hba data structure.
11305 * @vpi: vpi value to activate with the port.
11306 *
11307 * This routine is invoked to activate a vpi with the
11308 * port when the host intends to use vports with a
11309 * nonzero vpi.
11310 *
11311 * Returns:
11312 * 0 success
11313 * -Evalue otherwise
11314 **/
11315int
11316lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11317{
11318 LPFC_MBOXQ_t *mboxq;
11319 int rc = 0;
11320 uint32_t mbox_tmo;
11321
11322 if (vpi == 0)
11323 return -EINVAL;
11324 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11325 if (!mboxq)
11326 return -ENOMEM;
11327 lpfc_init_vpi(mboxq, vpi);
11328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11329 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11330 if (rc != MBX_TIMEOUT)
11331 mempool_free(mboxq, phba->mbox_mem_pool);
11332 if (rc != MBX_SUCCESS) {
11333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11334 "2022 INIT VPI Mailbox failed "
11335 "status %d, mbxStatus x%x\n", rc,
11336 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11337 rc = -EIO;
11338 }
11339 return rc;
11340}
11341
11342/**
11343 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11344 * @phba: pointer to lpfc hba data structure.
11345 * @mboxq: Pointer to mailbox object.
11346 *
11347 * This routine is invoked to manually add a single FCF record. The caller
11348 * must pass a completely initialized FCF_Record. This routine takes
11349 * care of the nonembedded mailbox operations.
11350 **/
11351static void
11352lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11353{
11354 void *virt_addr;
11355 union lpfc_sli4_cfg_shdr *shdr;
11356 uint32_t shdr_status, shdr_add_status;
11357
11358 virt_addr = mboxq->sge_array->addr[0];
11359 /* The IOCTL status is embedded in the mailbox subheader. */
11360 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11361 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11362 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11363
11364 if ((shdr_status || shdr_add_status) &&
11365 (shdr_status != STATUS_FCF_IN_USE))
11366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11367 "2558 ADD_FCF_RECORD mailbox failed with "
11368 "status x%x add_status x%x\n",
11369 shdr_status, shdr_add_status);
11370
11371 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11372}
11373
11374/**
11375 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11376 * @phba: pointer to lpfc hba data structure.
11377 * @fcf_record: pointer to the initialized fcf record to add.
11378 *
11379 * This routine is invoked to manually add a single FCF record. The caller
11380 * must pass a completely initialized FCF_Record. This routine takes
11381 * care of the nonembedded mailbox operations.
11382 **/
11383int
11384lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11385{
11386 int rc = 0;
11387 LPFC_MBOXQ_t *mboxq;
11388 uint8_t *bytep;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 struct lpfc_mbx_sge sge;
11392 uint32_t alloc_len, req_len;
11393 uint32_t fcfindex;
11394
11395 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11396 if (!mboxq) {
11397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11398 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11399 return -ENOMEM;
11400 }
11401
11402 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11403 sizeof(uint32_t);
11404
11405 /* Allocate DMA memory and set up the non-embedded mailbox command */
11406 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11407 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11408 req_len, LPFC_SLI4_MBX_NEMBED);
11409 if (alloc_len < req_len) {
11410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11411 "2523 Allocated DMA memory size (x%x) is "
11412 "less than the requested DMA memory "
11413 "size (x%x)\n", alloc_len, req_len);
11414 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11415 return -ENOMEM;
11416 }
11417
11418 /*
11419 * Get the first SGE entry from the non-embedded DMA memory. This
11420 * routine only uses a single SGE.
11421 */
11422 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11423 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11424 if (unlikely(!mboxq->sge_array)) {
11425 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11426 "2526 Failed to get the non-embedded SGE "
11427 "virtual address\n");
11428 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11429 return -ENOMEM;
11430 }
11431 virt_addr = mboxq->sge_array->addr[0];
11432 /*
11433 * Configure the FCF record for FCFI 0. This is the driver's
11434 * hardcoded default and gets used in nonFIP mode.
11435 */
11436 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11437 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11438 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11439
11440 /*
11441 * Copy the fcf_index and the FCF Record Data. The data starts after
11442 * the FCoE header plus word10. The data copy needs to be endian
11443 * correct.
11444 */
11445 bytep += sizeof(uint32_t);
11446 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11447 mboxq->vport = phba->pport;
11448 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11449 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11450 if (rc == MBX_NOT_FINISHED) {
11451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11452 "2515 ADD_FCF_RECORD mailbox failed with "
11453 "status 0x%x\n", rc);
11454 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11455 rc = -EIO;
11456 } else
11457 rc = 0;
11458
11459 return rc;
11460}
11461
11462/**
11463 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11464 * @phba: pointer to lpfc hba data structure.
11465 * @fcf_record: pointer to the fcf record to write the default data.
11466 * @fcf_index: FCF table entry index.
11467 *
11468 * This routine is invoked to build the driver's default FCF record. The
11469 * values used are hardcoded. This routine handles memory initialization.
11470 *
11471 **/
11472void
11473lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11474 struct fcf_record *fcf_record,
11475 uint16_t fcf_index)
11476{
11477 memset(fcf_record, 0, sizeof(struct fcf_record));
11478 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11479 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11480 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11481 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11482 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11483 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11484 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11485 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11486 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11487 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11488 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11489 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11490 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11491 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
11492 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11493 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11494 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11495 /* Set the VLAN bit map */
11496 if (phba->valid_vlan) {
11497 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11498 = 1 << (phba->vlan_id % 8);
11499 }
11500}
11501
11502/**
11503 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11504 * @phba: pointer to lpfc hba data structure.
11505 * @fcf_index: FCF table entry offset.
11506 *
11507 * This routine is invoked to read up to @fcf_num of FCF record from the
11508 * device starting with the given @fcf_index.
11509 **/
11510int
11511lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11512{
11513 int rc = 0, error;
11514 LPFC_MBOXQ_t *mboxq;
11515 void *virt_addr;
11516 dma_addr_t phys_addr;
11517 uint8_t *bytep;
11518 struct lpfc_mbx_sge sge;
11519 uint32_t alloc_len, req_len;
11520 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11521
11522 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11523 if (!mboxq) {
11524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11525 "2000 Failed to allocate mbox for "
11526 "READ_FCF cmd\n");
11527 return -ENOMEM;
11528 }
11529
11530 req_len = sizeof(struct fcf_record) +
11531 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11532
11533 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11534 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11535 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11536 LPFC_SLI4_MBX_NEMBED);
11537
11538 if (alloc_len < req_len) {
11539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11540 "0291 Allocated DMA memory size (x%x) is "
11541 "less than the requested DMA memory "
11542 "size (x%x)\n", alloc_len, req_len);
11543 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11544 return -ENOMEM;
11545 }
11546
11547 /* Get the first SGE entry from the non-embedded DMA memory. This
11548 * routine only uses a single SGE.
11549 */
11550 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11551 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11552 if (unlikely(!mboxq->sge_array)) {
11553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11554 "2527 Failed to get the non-embedded SGE "
11555 "virtual address\n");
11556 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11557 return -ENOMEM;
11558 }
11559 virt_addr = mboxq->sge_array->addr[0];
11560 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11561
11562 /* Set up command fields */
11563 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11564 /* Perform necessary endian conversion */
11565 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11566 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11567 mboxq->vport = phba->pport;
11568 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11569 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11570 if (rc == MBX_NOT_FINISHED) {
11571 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11572 error = -EIO;
11573 } else
11574 error = 0;
11575 return error;
11576}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6a..3c53316cf6d0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -46,6 +56,7 @@ struct lpfc_iocbq {
46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
47#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
48#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40
49 60
50 uint8_t abort_count; 61 uint8_t abort_count;
51 uint8_t rsvd2; 62 uint8_t rsvd2;
@@ -65,7 +76,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 76 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 77 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 78 struct lpfc_iocbq *);
68 79 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 80};
70 81
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 82#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +92,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 92typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 93 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 94 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 95 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 96 MAILBOX_t mb; /* Mailbox cmd */
97 struct lpfc_mqe mqe;
98 } u;
99 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 100 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 101 void *context2; /* caller context information */
88 102
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 103 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 104 uint8_t mbox_flag;
91 105 struct lpfc_mcqe mcqe;
106 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 107} LPFC_MBOXQ_t;
93 108
94#define MBX_POLL 1 /* poll mailbox till command done, then 109#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +245,11 @@ struct lpfc_sli {
230 245
231 /* Additional sli_flags */ 246 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 247#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 248#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 249#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 250#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 251#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
252#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 253
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 254 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 255 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +277,8 @@ struct lpfc_sli {
261 277
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 278#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 279 command */
280#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
281 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 282#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 283 * or erase cmds. This is especially
266 * long because of the potential of 284 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 000000000000..3b276b47d18f
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 256
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e3078..41094e02304b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.3"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0aff..e0b49922193e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -665,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
665 } 695 }
666 vport->unreg_vpi_cmpl = VPORT_INVAL; 696 vport->unreg_vpi_cmpl = VPORT_INVAL;
667 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 697 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
668 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
669 goto skip_logo;
670 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 698 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
671 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 699 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
672 timeout = schedule_timeout(timeout); 700 timeout = schedule_timeout(timeout);
@@ -710,7 +738,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 738 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 739 struct lpfc_vport **vports;
712 int index = 0; 740 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 741 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 742 GFP_KERNEL);
715 if (vports == NULL) 743 if (vports == NULL)
716 return NULL; 744 return NULL;
@@ -734,7 +762,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 762 int i;
735 if (vports == NULL) 763 if (vports == NULL)
736 return; 764 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 765 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 766 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 767 kfree(vports);
740} 768}