aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h123
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c250
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c275
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1365
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h142
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c674
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c934
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6683
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c62
22 files changed, 17514 insertions, 1672 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111ba..540569849099 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t,
467 struct lpfc_iocbq *, uint32_t);
468 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
469 struct lpfc_iocbq *);
470 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
471
472
473 IOCB_t * (*lpfc_get_iocb_from_iocbq)
474 (struct lpfc_iocbq *);
475 void (*lpfc_scsi_cmd_iocb_cmpl)
476 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
477
478 /* MBOX interface function jump table entries */
479 int (*lpfc_sli_issue_mbox)
480 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
481 /* Slow-path IOCB process function jump table entries */
482 void (*lpfc_sli_handle_slow_ring_event)
483 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
484 uint32_t mask);
485 /* INIT device interface function jump table entries */
486 int (*lpfc_sli_hbq_to_firmware)
487 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
488 int (*lpfc_sli_brdrestart)
489 (struct lpfc_hba *);
490 int (*lpfc_sli_brdready)
491 (struct lpfc_hba *, uint32_t);
492 void (*lpfc_handle_eratt)
493 (struct lpfc_hba *);
494 void (*lpfc_stop_port)
495 (struct lpfc_hba *);
496
497
498 /* SLI4 specific HBA data structure */
499 struct lpfc_sli4_hba sli4_hba;
500
423 struct lpfc_sli sli; 501 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 502 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
503 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 504 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 505#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 506#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 508#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 509#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 510#define LPFC_SLI3_BG_ENABLED 0x20
511#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 512 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 513 uint32_t iocb_rsp_size;
434 514
@@ -442,8 +522,13 @@ struct lpfc_hba {
442 522
443 uint32_t hba_flag; /* hba generic flags */ 523 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 524#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 525#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 526#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
527#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
528#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
529#define FCP_XRI_ABORT_EVENT 0x20
530#define ELS_XRI_ABORT_EVENT 0x40
531#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 532 struct lpfc_dmabuf slim2p;
448 533
449 MAILBOX_t *mbox; 534 MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 587 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 588 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 589 uint32_t cfg_use_msi;
590 uint32_t cfg_fcp_imax;
591 uint32_t cfg_fcp_wq_count;
592 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 593 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 594 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 595 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 599 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 600 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 601 uint32_t cfg_enable_bg;
602 uint32_t cfg_enable_fip;
603 uint32_t cfg_log_verbose;
514 604
515 lpfc_vpd_t vpd; /* vital product data */ 605 lpfc_vpd_t vpd; /* vital product data */
516 606
@@ -526,11 +616,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 616 unsigned long data_flags;
527 617
528 uint32_t hbq_in_use; /* HBQs in use flag */ 618 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 619 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 620 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 621 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 622
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 623 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
624 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 625 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 626 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 627 PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 684 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 685 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 686 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 687 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
688 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 689 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 690
599 mempool_t *mbox_mem_pool; 691 mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 701 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 702 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 703#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
704 uint16_t max_vports; /*
705 * For IOV HBAs max_vpi can change
706 * after a reset. max_vports is max
707 * number of vports present. This can
708 * be greater than max_vpi.
709 */
710 uint16_t vpi_base;
711 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 712 unsigned long *vpi_bmask; /* vpi allocation table */
613 713
614 /* Data structure used by fabric iocb scheduler */ 714 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 767/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 768#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 769 atomic_t fast_event_count;
770 struct lpfc_fcf fcf;
771 uint8_t fc_map[3];
772 uint8_t valid_vlan;
773 uint16_t vlan_id;
774 struct list_head fcf_conn_rec_list;
670}; 775};
671 776
672static inline struct Scsi_Host * 777static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb125..d73e677201f8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3278 }
3200 } 3279 }
3201 3280
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3281 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3282 buf, count);
3204 3283
3205 phba->sysfs_mbox.offset = off + count; 3284 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3320 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3321 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3322 int rc;
3323 MAILBOX_t *pmb;
3244 3324
3245 if (off > MAILBOX_CMD_SIZE) 3325 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3326 return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3345 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3346 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3347 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3348 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3349 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3350 /* Offline only */
3271 case MBX_INIT_LINK: 3351 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3352 case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3363 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3364 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3365 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3366 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3367 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3369 return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3399 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3400 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3401 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3402 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3403 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3404 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3405 return -EPERM;
3326 default: 3406 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3411 return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3415 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3416 */
3337 if (phba->pport->stopped && 3417 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3418 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3419 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3420 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3421 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3422 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3423 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3424 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3425 pmb->mbxCommand);
3346 3426
3347 phba->sysfs_mbox.mbox->vport = vport; 3427 phba->sysfs_mbox.mbox->vport = vport;
3348 3428
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3436 }
3357 3437
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3438 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3439 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3440
3361 spin_unlock_irq(&phba->hbalock); 3441 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3442 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3448 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3449 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3450 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3451 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3452 spin_lock_irq(&phba->hbalock);
3374 } 3453 }
3375 3454
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3470 return -EAGAIN;
3392 } 3471 }
3393 3472
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3473 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3474
3396 phba->sysfs_mbox.offset = off + count; 3475 phba->sysfs_mbox.offset = off + count;
3397 3476
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3664 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3665 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3666 break;
3667 case LA_10GHZ_LINK:
3668 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3669 break;
3588 default: 3670 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3671 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3672 break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3734 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3735 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3736 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3737 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3738 return NULL;
3657 3739
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3740 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3745 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3746 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3747
3666 pmb = &pmboxq->mb; 3748 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3749 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3750 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3751 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3752 pmboxq->vport = vport;
3671 3753
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3754 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3755 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3756 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3757 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3758 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3777 pmboxq->vport = vport;
3696 3778
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3779 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3780 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3782 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3851 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3852 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3853
3772 pmb = &pmboxq->mb; 3854 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3855 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3856 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3857 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3859 pmboxq->vport = vport;
3778 3860
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3861 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3862 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3863 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3864 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3865 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3877 pmboxq->vport = vport;
3796 3878
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3879 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3880 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3881 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3882 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3883 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4044 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4045}
3964 4046
4047/**
4048 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4049 * @phba: Pointer to lpfc_hba struct.
4050 *
4051 * This function is called by the lpfc_get_cfgparam() routine to set the
4052 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4053 * log messsage according to the module's lpfc_log_verbose parameter setting
4054 * before hba port or vport created.
4055 **/
4056static void
4057lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4058{
4059 phba->cfg_log_verbose = verbose;
4060}
4061
3965struct fc_function_template lpfc_transport_functions = { 4062struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4063 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4064 .show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4202 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4203 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4204 lpfc_use_msi_init(phba, lpfc_use_msi);
4205 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4206 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4207 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4208 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4209 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4210 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4213 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4214 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4215 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4216 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4217 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4218 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4219
4136 return; 4220 return;
4137} 4221}
4138 4222
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f26190..d2a922997c0f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e5..1dbccfd3d022 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (vp->rev.rBit) {
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1737 rev = vp->rev.sli2FwRev;
1735 else 1738 else
1736 rev = vp->rev.sli1FwRev; 1739 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1759 }
1757 b4 = (rev & 0x0000000f); 1760 b4 = (rev & 0x0000000f);
1758 1761
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1762 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1763 fwname = vp->rev.sli2FwName;
1761 else 1764 else
1762 fwname = vp->rev.sli1FwName; 1765 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07a..2b02b1fb39a0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 282 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 283 struct hbq_dmabuf *hbq_buf;
282 284
285 if (phba->sli_rev != 3)
286 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 287 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 288 spin_lock_irq(&phba->hbalock);
285 289
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 493 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 494 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 495 }
492 word0 = readl(phba->HAregaddr); 496
493 word1 = readl(phba->CAregaddr); 497 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 498 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 499 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 500 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 501 word3 = readl(phba->HCregaddr);
502 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
503 "HC:%08x\n", word0, word1, word2, word3);
504 }
498 spin_unlock_irq(&phba->hbalock); 505 spin_unlock_irq(&phba->hbalock);
499 return len; 506 return len;
500} 507}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd108972072..1142070e9484 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d2..6bdeb14878a2 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 222 icmd->un.elsreq64.myID = vport->fc_myDID;
220 223
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 224 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 225 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 226 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 227 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 228 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 308 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 309 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 310 **/
308static int 311int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 312lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 313{
311 struct lpfc_hba *phba = vport->phba; 314 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 348 err = 4;
346 goto fail; 349 goto fail;
347 } 350 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 351 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 352 if (rc) {
351 err = 5; 353 err = 5;
352 goto fail_free_mbox; 354 goto fail_free_mbox;
@@ -386,6 +388,75 @@ fail:
386} 388}
387 389
388/** 390/**
391 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
392 * @vport: pointer to a host virtual N_Port data structure.
393 *
394 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
395 * the @vport. This mailbox command is necessary for FCoE only.
396 *
397 * Return code
398 * 0 - successfully issued REG_VFI for @vport
399 * A failure code otherwise.
400 **/
401static int
402lpfc_issue_reg_vfi(struct lpfc_vport *vport)
403{
404 struct lpfc_hba *phba = vport->phba;
405 LPFC_MBOXQ_t *mboxq;
406 struct lpfc_nodelist *ndlp;
407 struct serv_parm *sp;
408 struct lpfc_dmabuf *dmabuf;
409 int rc = 0;
410
411 sp = &phba->fc_fabparam;
412 ndlp = lpfc_findnode_did(vport, Fabric_DID);
413 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
414 rc = -ENODEV;
415 goto fail;
416 }
417
418 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
419 if (!dmabuf) {
420 rc = -ENOMEM;
421 goto fail;
422 }
423 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
424 if (!dmabuf->virt) {
425 rc = -ENOMEM;
426 goto fail_free_dmabuf;
427 }
428 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
429 if (!mboxq) {
430 rc = -ENOMEM;
431 goto fail_free_coherent;
432 }
433 vport->port_state = LPFC_FABRIC_CFG_LINK;
434 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
435 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
436 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
437 mboxq->vport = vport;
438 mboxq->context1 = dmabuf;
439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
440 if (rc == MBX_NOT_FINISHED) {
441 rc = -ENXIO;
442 goto fail_free_mbox;
443 }
444 return 0;
445
446fail_free_mbox:
447 mempool_free(mboxq, phba->mbox_mem_pool);
448fail_free_coherent:
449 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
450fail_free_dmabuf:
451 kfree(dmabuf);
452fail:
453 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
454 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
455 "0289 Issue Register VFI failed: Err %d\n", rc);
456 return rc;
457}
458
459/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 460 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 461 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 462 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 568 }
498 } 569 }
499 570
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 571 if (phba->sli_rev < LPFC_SLI_REV4) {
501 572 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 573 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 574 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 575 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 576 else
577 lpfc_issue_fabric_reglogin(vport);
578 } else {
579 ndlp->nlp_type |= NLP_FABRIC;
580 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
581 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
582 lpfc_start_fdiscs(phba);
583 lpfc_do_scr_ns_plogi(phba, vport);
584 } else
585 lpfc_issue_reg_vfi(vport);
506 } 586 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 587 return 0;
509} 588}
510
511/** 589/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 590 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 591 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 893 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 894 sp->cmn.fcphHigh = FC_PH3;
817 895
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 896 if (phba->sli_rev == LPFC_SLI_REV4) {
897 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
898 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
899 /* FLOGI needs to be 3 for WQE FCFI */
900 /* Set the fcfi to the fcfi we registered with */
901 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
902 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 903 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 904 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 905 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 906 icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1013 if (!ndlp)
931 return 0; 1014 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1015 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1016 /* Set the node type */
1017 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1018 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1019 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1020 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1435 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1436 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1437 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1438 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1439 uint8_t *pcmd;
1356 uint16_t cmdsize; 1440 uint16_t cmdsize;
1357 int ret; 1441 int ret;
1358 1442
1359 psli = &phba->sli; 1443 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1444
1362 ndlp = lpfc_findnode_did(vport, did); 1445 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1446 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1474
1392 phba->fc_stat.elsXmitPLOGI++; 1475 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1476 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1477 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1478
1396 if (ret == IOCB_ERROR) { 1479 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1480 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1584 PRLI *npr;
1502 IOCB_t *icmd; 1585 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1586 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1587 uint8_t *pcmd;
1507 uint16_t cmdsize; 1588 uint16_t cmdsize;
1508 1589
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1590 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1591 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1592 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1628 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1629 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1630 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1631 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1632 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1633 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1634 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1635 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1687 * and continue discovery.
1609 */ 1688 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1690 !(vport->fc_flag & FC_RSCN_MODE) &&
1691 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1692 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1693 return;
1614 } 1694 }
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1868 ADISC *ap;
1789 IOCB_t *icmd; 1869 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1870 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1871 uint8_t *pcmd;
1794 uint16_t cmdsize; 1872 uint16_t cmdsize;
1795 1873
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1900 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1901 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1902 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1903 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1904 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1905 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1906 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1907 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2016 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2017 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2018 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2019 uint8_t *pcmd;
1943 uint16_t cmdsize; 2020 uint16_t cmdsize;
1944 int rc; 2021 int rc;
1945 2022
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2023 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2024 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2025 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2052 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2053 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2054 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2055 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2056
1983 if (rc == IOCB_ERROR) { 2057 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2058 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2132 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2133 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2134 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2135 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2136 uint8_t *pcmd;
2064 uint16_t cmdsize; 2137 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2138 struct lpfc_nodelist *ndlp;
2066 2139
2067 psli = &phba->sli; 2140 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2141 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2142
2071 ndlp = lpfc_findnode_did(vport, nportid); 2143 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2180
2109 phba->fc_stat.elsXmitSCR++; 2181 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2182 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2183 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2184 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2185 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2186 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2187 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2225 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2226 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2227 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2228 struct lpfc_sli *psli;
2157 FARP *fp; 2229 FARP *fp;
2158 uint8_t *pcmd; 2230 uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2234 struct lpfc_nodelist *ndlp;
2163 2235
2164 psli = &phba->sli; 2236 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2237 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2238
2168 ndlp = lpfc_findnode_did(vport, nportid); 2239 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2290
2220 phba->fc_stat.elsXmitFARPR++; 2291 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2292 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2293 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2294 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2295 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2296 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2297 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3021 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3022 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3023
3024 /*
3025 * This routine is used to register and unregister in previous SLI
3026 * modes.
3027 */
3028 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3029 (phba->sli_rev == LPFC_SLI_REV4))
3030 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3031
2952 pmb->context1 = NULL; 3032 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3033 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3034 kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3041 */
2962 lpfc_nlp_not_used(ndlp); 3042 lpfc_nlp_not_used(ndlp);
2963 } 3043 }
3044
2964 return; 3045 return;
2965} 3046}
2966 3047
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3251 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3252 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3253 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3254 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3255 uint8_t *pcmd;
3176 uint16_t cmdsize; 3256 uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3258 ELS_PKT *els_pkt_ptr;
3179 3259
3180 psli = &phba->sli; 3260 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3261 oldcmd = &oldiocb->iocb;
3183 3262
3184 switch (flag) { 3263 switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3345 }
3267 3346
3268 phba->fc_stat.elsXmitACC++; 3347 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3348 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3349 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3350 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3351 return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3384 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3385 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3386 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3387 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3388 uint8_t *pcmd;
3311 uint16_t cmdsize; 3389 uint16_t cmdsize;
3312 int rc; 3390 int rc;
3313 3391
3314 psli = &phba->sli; 3392 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3393 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3394 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3395 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3422
3347 phba->fc_stat.elsXmitLSRJT++; 3423 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3425 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3426
3351 if (rc == IOCB_ERROR) { 3427 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3428 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3455 struct lpfc_nodelist *ndlp)
3380{ 3456{
3381 struct lpfc_hba *phba = vport->phba; 3457 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3458 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3459 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3460 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3496
3423 phba->fc_stat.elsXmitACC++; 3497 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3498 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3499 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3500 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3501 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3502 return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3533 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3534 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3535 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3536 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3537 uint8_t *pcmd;
3465 uint16_t cmdsize; 3538 uint16_t cmdsize;
3466 int rc; 3539 int rc;
3467 3540
3468 psli = &phba->sli; 3541 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3542
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3543 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3544 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3592 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3593 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3594
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3595 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3596 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3597 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3598 return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3634 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3635 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3636 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3637 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3638 uint8_t *pcmd;
3568 uint16_t cmdsize; 3639 uint16_t cmdsize;
3569 int rc; 3640 int rc;
3570 3641
3571 psli = &phba->sli; 3642 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3643 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3644 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3645 if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3695 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3696 * it could be freed */
3628 3697
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3699 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3700 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3701 return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3908 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3909 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3910 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3911 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3912 && (ns_did.un.b.area == rscn_did.un.b.area)
3913 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3914 goto return_did_out;
3844 break; 3915 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3916 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4371 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4372 phba->cfg_topology,
4302 phba->cfg_link_speed); 4373 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4374 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4375 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4376 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4511static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4512lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4513{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4514 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4515 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4516 RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4520 uint16_t xri, status;
4452 uint32_t cmdsize; 4521 uint32_t cmdsize;
4453 4522
4454 mb = &pmb->mb; 4523 mb = &pmb->u.mb;
4455 4524
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4525 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4526 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4576 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4577 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4578 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4579 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4580 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4581 return;
4513} 4582}
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4685 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4686 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4687 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4688 uint8_t *pcmd;
4622 4689
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4690 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4721 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4722 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4723 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4724 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4725 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4726 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4727 return 1;
4660 } 4728 }
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4951 } else {
4884 /* FAN verified - skip FLOGI */ 4952 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4953 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4954 if (phba->sli_rev < LPFC_SLI_REV4)
4955 lpfc_issue_fabric_reglogin(vport);
4956 else
4957 lpfc_issue_reg_vfi(vport);
4887 } 4958 }
4888 } 4959 }
4889 return 0; 4960 return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5637
5567dropit: 5638dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5639 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5640 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5641 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5642 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5643 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5644 phba->fc_stat.elsRcvDrop++;
5575} 5645}
5576 5646
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5716 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5717 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5718 vport = phba->pport;
5649 else { 5719 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5720 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5721 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5722 }
5654 /* If there are no BDEs associated 5723 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5724 * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5850 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5852 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5853 MAILBOX_t *mb = &pmb->u.mb;
5785 5854
5786 spin_lock_irq(shost->host_lock); 5855 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5856 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5887
5819 } else { 5888 } else {
5820 if (vport == phba->pport) 5889 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5890 if (phba->sli_rev < LPFC_SLI_REV4)
5891 lpfc_issue_fabric_reglogin(vport);
5892 else
5893 lpfc_issue_reg_vfi(vport);
5822 else 5894 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5895 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5896 }
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5922
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5924 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5925 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5926 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5927 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5928 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6211{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6213 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6214 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6215 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6216 uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6240 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6241 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6242 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6243 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6244 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6245 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6246 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6247 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6296 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6297 unsigned long iflags;
6226 int ret; 6298 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6299 IOCB_t *cmd;
6229 6300
6230repeat: 6301repeat:
@@ -6248,7 +6319,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6319 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6320 iocb->vport->port_state, 0, 0);
6250 6321
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6322 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6323
6253 if (ret == IOCB_ERROR) { 6324 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6325 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6465lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6466{
6396 unsigned long iflags; 6467 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6468 int ready;
6399 int ret; 6469 int ret;
6400 6470
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6488 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6489 iocb->vport->port_state, 0, 0);
6420 6490
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6491 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6492
6423 if (ret == IOCB_ERROR) { 6493 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6494 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6595 IOERR_SLI_ABORTED);
6526} 6596}
6597
6598/**
6599 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6600 * @phba: pointer to lpfc hba data structure.
6601 * @axri: pointer to the els xri abort wcqe structure.
6602 *
6603 * This routine is invoked by the worker thread to process a SLI4 slow-path
6604 * ELS aborted xri.
6605 **/
6606void
6607lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6608 struct sli4_wcqe_xri_aborted *axri)
6609{
6610 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6611 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6612 unsigned long iflag = 0;
6613
6614 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6615 list_for_each_entry_safe(sglq_entry, sglq_next,
6616 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6617 if (sglq_entry->sli4_xritag == xri) {
6618 list_del(&sglq_entry->list);
6619 spin_unlock_irqrestore(
6620 &phba->sli4_hba.abts_sgl_list_lock,
6621 iflag);
6622 spin_lock_irqsave(&phba->hbalock, iflag);
6623
6624 list_add_tail(&sglq_entry->list,
6625 &phba->sli4_hba.lpfc_sgl_list);
6626 spin_unlock_irqrestore(&phba->hbalock, iflag);
6627 return;
6628 }
6629 }
6630 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6631}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf704..35c41ae75be2 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,592 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1203 new_fcf_record);
1204 if (phba->valid_vlan)
1205 *vlan_id = phba->vlan_id;
1206 else
1207 *vlan_id = 0xFFFF;
1208 return 1;
1209 }
1210
1211 /*
1212 * If there are no FCF connection table entry, driver connect to all
1213 * FCFs.
1214 */
1215 if (list_empty(&phba->fcf_conn_rec_list)) {
1216 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record);
1219 *vlan_id = 0xFFFF;
1220 return 1;
1221 }
1222
1223 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1224 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1225 continue;
1226
1227 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1228 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1229 new_fcf_record))
1230 continue;
1231
1232 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1233 /*
1234 * If the vlan bit map does not have the bit set for the
1235 * vlan id to be used, then it is not a match.
1236 */
1237 if (!(new_fcf_record->vlan_bitmap
1238 [conn_entry->conn_rec.vlan_tag / 8] &
1239 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1240 continue;
1241 }
1242
1243 /*
1244 * Check if the connection record specifies a required
1245 * addressing mode.
1246 */
1247 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1248 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1249
1250 /*
1251 * If SPMA required but FCF not support this continue.
1252 */
1253 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1254 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1255 new_fcf_record) & LPFC_FCF_SPMA))
1256 continue;
1257
1258 /*
1259 * If FPMA required but FCF not support this continue.
1260 */
1261 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1262 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1263 new_fcf_record) & LPFC_FCF_FPMA))
1264 continue;
1265 }
1266
1267 /*
1268 * This fcf record matches filtering criteria.
1269 */
1270 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1271 *boot_flag = 1;
1272 else
1273 *boot_flag = 0;
1274
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record);
1277 /*
1278 * If the user specified a required address mode, assign that
1279 * address mode
1280 */
1281 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1282 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1283 *addr_mode = (conn_entry->conn_rec.flags &
1284 FCFCNCT_AM_SPMA) ?
1285 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1286 /*
1287 * If the user specified a prefered address mode, use the
1288 * addr mode only if FCF support the addr_mode.
1289 */
1290 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1291 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1292 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1293 (*addr_mode & LPFC_FCF_SPMA))
1294 *addr_mode = LPFC_FCF_SPMA;
1295 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1296 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag;
1309 else
1310 *vlan_id = 0xFFFF;
1311
1312 return 1;
1313 }
1314
1315 return 0;
1316}
1317
1318/**
1319 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1320 * @phba: pointer to lpfc hba data structure.
1321 * @mboxq: pointer to mailbox object.
1322 *
1323 * This function iterate through all the fcf records available in
1324 * HBA and choose the optimal FCF record for discovery. After finding
1325 * the FCF for discovery it register the FCF record and kick start
1326 * discovery.
1327 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1328 * use a FCF record which match fabric name and mac address of the
1329 * currently used FCF record.
1330 * If the driver support only one FCF, it will try to use the FCF record
1331 * used by BOOT_BIOS.
1332 */
1333void
1334lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1335{
1336 void *virt_addr;
1337 dma_addr_t phys_addr;
1338 uint8_t *bytep;
1339 struct lpfc_mbx_sge sge;
1340 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1341 uint32_t shdr_status, shdr_add_status;
1342 union lpfc_sli4_cfg_shdr *shdr;
1343 struct fcf_record *new_fcf_record;
1344 int rc;
1345 uint32_t boot_flag, addr_mode;
1346 uint32_t next_fcf_index;
1347 unsigned long flags;
1348 uint16_t vlan_id;
1349
1350 /* Get the first SGE entry from the non-embedded DMA memory. This
1351 * routine only uses a single SGE.
1352 */
1353 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1354 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1355 if (unlikely(!mboxq->sge_array)) {
1356 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1357 "2524 Failed to get the non-embedded SGE "
1358 "virtual address\n");
1359 goto out;
1360 }
1361 virt_addr = mboxq->sge_array->addr[0];
1362
1363 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1364 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1365 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1366 &shdr->response);
1367 /*
1368 * The FCF Record was read and there is no reason for the driver
1369 * to maintain the FCF record data or memory. Instead, just need
1370 * to book keeping the FCFIs can be used.
1371 */
1372 if (shdr_status || shdr_add_status) {
1373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1374 "2521 READ_FCF_RECORD mailbox failed "
1375 "with status x%x add_status x%x, mbx\n",
1376 shdr_status, shdr_add_status);
1377 goto out;
1378 }
1379 /* Interpreting the returned information of FCF records */
1380 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1381 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1382 sizeof(struct lpfc_mbx_read_fcf_tbl));
1383 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1384
1385 new_fcf_record = (struct fcf_record *)(virt_addr +
1386 sizeof(struct lpfc_mbx_read_fcf_tbl));
1387 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1388 sizeof(struct fcf_record));
1389 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1390
1391 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1392 &boot_flag, &addr_mode,
1393 &vlan_id);
1394 /*
1395 * If the fcf record does not match with connect list entries
1396 * read the next entry.
1397 */
1398 if (!rc)
1399 goto read_next_fcf;
1400 /*
1401 * If this is not the first FCF discovery of the HBA, use last
1402 * FCF record for the discovery.
1403 */
1404 spin_lock_irqsave(&phba->hbalock, flags);
1405 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1406 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1407 new_fcf_record) &&
1408 lpfc_mac_addr_match(phba, new_fcf_record)) {
1409 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1410 spin_unlock_irqrestore(&phba->hbalock, flags);
1411 goto out;
1412 }
1413 spin_unlock_irqrestore(&phba->hbalock, flags);
1414 goto read_next_fcf;
1415 }
1416 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1417 /*
1418 * If the current FCF record does not have boot flag
1419 * set and new fcf record has boot flag set, use the
1420 * new fcf record.
1421 */
1422 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1423 /* Use this FCF record */
1424 lpfc_copy_fcf_record(phba, new_fcf_record);
1425 phba->fcf.addr_mode = addr_mode;
1426 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1427 if (vlan_id != 0xFFFF) {
1428 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1429 phba->fcf.vlan_id = vlan_id;
1430 }
1431 spin_unlock_irqrestore(&phba->hbalock, flags);
1432 goto read_next_fcf;
1433 }
1434 /*
1435 * If the current FCF record has boot flag set and the
1436 * new FCF record does not have boot flag, read the next
1437 * FCF record.
1438 */
1439 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1440 spin_unlock_irqrestore(&phba->hbalock, flags);
1441 goto read_next_fcf;
1442 }
1443 /*
1444 * If there is a record with lower priority value for
1445 * the current FCF, use that record.
1446 */
1447 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1448 && (new_fcf_record->fip_priority <
1449 phba->fcf.priority)) {
1450 /* Use this FCF record */
1451 lpfc_copy_fcf_record(phba, new_fcf_record);
1452 phba->fcf.addr_mode = addr_mode;
1453 if (vlan_id != 0xFFFF) {
1454 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1455 phba->fcf.vlan_id = vlan_id;
1456 }
1457 spin_unlock_irqrestore(&phba->hbalock, flags);
1458 goto read_next_fcf;
1459 }
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * This is the first available FCF record, use this
1465 * record.
1466 */
1467 lpfc_copy_fcf_record(phba, new_fcf_record);
1468 phba->fcf.addr_mode = addr_mode;
1469 if (boot_flag)
1470 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1471 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1472 if (vlan_id != 0xFFFF) {
1473 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1474 phba->fcf.vlan_id = vlan_id;
1475 }
1476 spin_unlock_irqrestore(&phba->hbalock, flags);
1477 goto read_next_fcf;
1478
1479read_next_fcf:
1480 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1481 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1482 lpfc_register_fcf(phba);
1483 else
1484 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1485 return;
1486
1487out:
1488 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1489 lpfc_register_fcf(phba);
1490
1491 return;
1492}
1493
1494/**
1495 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1496 * @phba: pointer to lpfc hba data structure.
1497 *
1498 * This function loops through the list of vports on the @phba and issues an
1499 * FDISC if possible.
1500 */
1501void
1502lpfc_start_fdiscs(struct lpfc_hba *phba)
1503{
1504 struct lpfc_vport **vports;
1505 int i;
1506
1507 vports = lpfc_create_vport_work_array(phba);
1508 if (vports != NULL) {
1509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1510 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1511 continue;
1512 /* There are no vpi for this vport */
1513 if (vports[i]->vpi > phba->max_vpi) {
1514 lpfc_vport_set_state(vports[i],
1515 FC_VPORT_FAILED);
1516 continue;
1517 }
1518 if (phba->fc_topology == TOPOLOGY_LOOP) {
1519 lpfc_vport_set_state(vports[i],
1520 FC_VPORT_LINKDOWN);
1521 continue;
1522 }
1523 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1524 lpfc_initial_fdisc(vports[i]);
1525 else {
1526 lpfc_vport_set_state(vports[i],
1527 FC_VPORT_NO_FABRIC_SUPP);
1528 lpfc_printf_vlog(vports[i], KERN_ERR,
1529 LOG_ELS,
1530 "0259 No NPIV "
1531 "Fabric support\n");
1532 }
1533 }
1534 }
1535 lpfc_destroy_vport_work_array(phba, vports);
1536}
1537
1538void
1539lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1540{
1541 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1542 struct lpfc_vport *vport = mboxq->vport;
1543
1544 if (mboxq->u.mb.mbxStatus) {
1545 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1546 "2018 REG_VFI mbxStatus error x%x "
1547 "HBA state x%x\n",
1548 mboxq->u.mb.mbxStatus, vport->port_state);
1549 if (phba->fc_topology == TOPOLOGY_LOOP) {
1550 /* FLOGI failed, use loop map to make discovery list */
1551 lpfc_disc_list_loopmap(vport);
1552 /* Start discovery */
1553 lpfc_disc_start(vport);
1554 goto fail_free_mem;
1555 }
1556 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1557 goto fail_free_mem;
1558 }
1559 /* Mark the vport has registered with its VFI */
1560 vport->vfi_state |= LPFC_VFI_REGISTERED;
1561
1562 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1563 lpfc_start_fdiscs(phba);
1564 lpfc_do_scr_ns_plogi(phba, vport);
1565 }
1566
1567fail_free_mem:
1568 mempool_free(mboxq, phba->mbox_mem_pool);
1569 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1570 kfree(dmabuf);
1571 return;
1572}
1573
1574static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1575lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1576{
964 MAILBOX_t *mb = &pmb->mb; 1577 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1578 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1579 struct lpfc_vport *vport = pmb->vport;
967 1580
@@ -1012,13 +1625,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1625lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1626{
1014 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1628 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1629 int i;
1017 struct lpfc_dmabuf *mp; 1630 struct lpfc_dmabuf *mp;
1018 int rc; 1631 int rc;
1632 struct fcf_record *fcf_record;
1019 1633
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1634 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1635
1023 spin_lock_irq(&phba->hbalock); 1636 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1637 switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1647 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1648 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1649 break;
1650 case LA_10GHZ_LINK:
1651 phba->fc_linkspeed = LA_10GHZ_LINK;
1652 break;
1037 default: 1653 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1654 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1655 break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1731 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1732 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1733 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1734 goto out;
1121 } 1735 }
1122 } 1736 }
1123 1737
1124 if (cfglink_mbox) { 1738 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1739 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1740 if (!cfglink_mbox)
1741 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1742 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1743 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1744 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1745 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1746 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1747 if (rc == MBX_NOT_FINISHED) {
1131 return; 1748 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1749 goto out;
1750 }
1751 } else {
1752 /*
1753 * Add the driver's default FCF record at FCF index 0 now. This
1754 * is phase 1 implementation that support FCF index 0 and driver
1755 * defaults.
1756 */
1757 if (phba->cfg_enable_fip == 0) {
1758 fcf_record = kzalloc(sizeof(struct fcf_record),
1759 GFP_KERNEL);
1760 if (unlikely(!fcf_record)) {
1761 lpfc_printf_log(phba, KERN_ERR,
1762 LOG_MBOX | LOG_SLI,
1763 "2554 Could not allocate memmory for "
1764 "fcf record\n");
1765 rc = -ENODEV;
1766 goto out;
1767 }
1768
1769 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1770 LPFC_FCOE_FCF_DEF_INDEX);
1771 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1772 if (unlikely(rc)) {
1773 lpfc_printf_log(phba, KERN_ERR,
1774 LOG_MBOX | LOG_SLI,
1775 "2013 Could not manually add FCF "
1776 "record 0, status %d\n", rc);
1777 rc = -ENODEV;
1778 kfree(fcf_record);
1779 goto out;
1780 }
1781 kfree(fcf_record);
1782 }
1783 /*
1784 * The driver is expected to do FIP/FCF. Call the port
1785 * and get the FCF Table.
1786 */
1787 rc = lpfc_sli4_read_fcf_record(phba,
1788 LPFC_FCOE_FCF_GET_FIRST);
1789 if (rc)
1790 goto out;
1133 } 1791 }
1792
1793 return;
1134out: 1794out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1795 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1807 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1808 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1809 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1810 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1811 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1812 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1813 writel(control, phba->HCregaddr);
1814 readl(phba->HCregaddr); /* flush */
1815 }
1154 spin_unlock_irq(&phba->hbalock); 1816 spin_unlock_irq(&phba->hbalock);
1155} 1817}
1156 1818
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1821{
1160 lpfc_linkdown(phba); 1822 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1823 lpfc_enable_la(phba);
1824 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1825 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1826}
1164 1827
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1838 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1840 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1841 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1842 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1843
1181 /* Unblock ELS traffic */ 1844 /* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1853 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1854 }
1192 1855
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1856 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1857
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1858 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1859
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 1991static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1992lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 1993{
1331 MAILBOX_t *mb = &pmb->mb; 1994 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 1995 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1996 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 1997
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2044{
1382 struct lpfc_vport *vport = pmb->vport; 2045 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2047 MAILBOX_t *mb = &pmb->u.mb;
1385 2048
1386 switch (mb->mbxStatus) { 2049 switch (mb->mbxStatus) {
1387 case 0x0011: 2050 case 0x0011:
@@ -1416,6 +2079,128 @@ out:
1416 return; 2079 return;
1417} 2080}
1418 2081
2082/**
2083 * lpfc_create_static_vport - Read HBA config region to create static vports.
2084 * @phba: pointer to lpfc hba data structure.
2085 *
2086 * This routine issue a DUMP mailbox command for config region 22 to get
2087 * the list of static vports to be created. The function create vports
2088 * based on the information returned from the HBA.
2089 **/
2090void
2091lpfc_create_static_vport(struct lpfc_hba *phba)
2092{
2093 LPFC_MBOXQ_t *pmb = NULL;
2094 MAILBOX_t *mb;
2095 struct static_vport_info *vport_info;
2096 int rc, i;
2097 struct fc_vport_identifiers vport_id;
2098 struct fc_vport *new_fc_vport;
2099 struct Scsi_Host *shost;
2100 struct lpfc_vport *vport;
2101 uint16_t offset = 0;
2102 uint8_t *vport_buff;
2103
2104 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2105 if (!pmb) {
2106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2107 "0542 lpfc_create_static_vport failed to"
2108 " allocate mailbox memory\n");
2109 return;
2110 }
2111
2112 mb = &pmb->u.mb;
2113
2114 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2115 if (!vport_info) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2117 "0543 lpfc_create_static_vport failed to"
2118 " allocate vport_info\n");
2119 mempool_free(pmb, phba->mbox_mem_pool);
2120 return;
2121 }
2122
2123 vport_buff = (uint8_t *) vport_info;
2124 do {
2125 lpfc_dump_static_vport(phba, pmb, offset);
2126 pmb->vport = phba->pport;
2127 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2128
2129 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2130 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2131 "0544 lpfc_create_static_vport failed to"
2132 " issue dump mailbox command ret 0x%x "
2133 "status 0x%x\n",
2134 rc, mb->mbxStatus);
2135 goto out;
2136 }
2137
2138 if (mb->un.varDmp.word_cnt >
2139 sizeof(struct static_vport_info) - offset)
2140 mb->un.varDmp.word_cnt =
2141 sizeof(struct static_vport_info) - offset;
2142
2143 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2144 vport_buff + offset,
2145 mb->un.varDmp.word_cnt);
2146 offset += mb->un.varDmp.word_cnt;
2147
2148 } while (mb->un.varDmp.word_cnt &&
2149 offset < sizeof(struct static_vport_info));
2150
2151
2152 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2153 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2154 != VPORT_INFO_REV)) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2156 "0545 lpfc_create_static_vport bad"
2157 " information header 0x%x 0x%x\n",
2158 le32_to_cpu(vport_info->signature),
2159 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2160
2161 goto out;
2162 }
2163
2164 shost = lpfc_shost_from_vport(phba->pport);
2165
2166 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2167 memset(&vport_id, 0, sizeof(vport_id));
2168 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2169 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2170 if (!vport_id.port_name || !vport_id.node_name)
2171 continue;
2172
2173 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2174 vport_id.vport_type = FC_PORTTYPE_NPIV;
2175 vport_id.disable = false;
2176 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2177
2178 if (!new_fc_vport) {
2179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2180 "0546 lpfc_create_static_vport failed to"
2181 " create vport \n");
2182 continue;
2183 }
2184
2185 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2186 vport->vport_flag |= STATIC_VPORT;
2187 }
2188
2189out:
2190 /*
2191 * If this is timed out command, setting NULL to context2 tell SLI
2192 * layer not to use this buffer.
2193 */
2194 spin_lock_irq(&phba->hbalock);
2195 pmb->context2 = NULL;
2196 spin_unlock_irq(&phba->hbalock);
2197 kfree(vport_info);
2198 if (rc != MBX_TIMEOUT)
2199 mempool_free(pmb, phba->mbox_mem_pool);
2200
2201 return;
2202}
2203
1419/* 2204/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2205 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2206 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2211lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2212{
1428 struct lpfc_vport *vport = pmb->vport; 2213 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2214 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2215 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2216 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2217
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2218 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2219 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2220 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2221 if (mb->mbxStatus) {
2222 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2223 "0258 Register Fabric login error: 0x%x\n",
2224 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2226 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2227 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2240 }
1455 2241
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2243 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2244 * to the ndlp are done.
1462 */ 2245 */
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2248 }
1466 2249
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2250 ndlp->nlp_rpi = mb->un.varWords[0];
2251 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2252 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2254
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2255 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2256 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2257 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2258 }
1498 2259
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2277void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2278lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2279{
1519 MAILBOX_t *mb = &pmb->mb; 2280 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2281 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2282 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2283 struct lpfc_vport *vport = pmb->vport;
1523 2284
1524 if (mb->mbxStatus) { 2285 if (mb->mbxStatus) {
1525out: 2286out:
2287 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2288 "0260 Register NameServer error: 0x%x\n",
2289 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2290 /* decrement the node reference count held for this
1527 * callback function. 2291 * callback function.
1528 */ 2292 */
@@ -1546,15 +2310,13 @@ out:
1546 return; 2310 return;
1547 } 2311 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2313 return;
1553 } 2314 }
1554 2315
1555 pmb->context1 = NULL; 2316 pmb->context1 = NULL;
1556 2317
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2318 ndlp->nlp_rpi = mb->un.varWords[0];
2319 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2320 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2321 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2322
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2817 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2818 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2819 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2820 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2821 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2822 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2823 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2864 */
2103 psli = &phba->sli; 2865 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2866 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2867 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2868 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2869 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2870 pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2912 LPFC_MBOXQ_t *mbox;
2151 int rc; 2913 int rc;
2152 2914
2153 if (ndlp->nlp_rpi) { 2915 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2916 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2917 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2918 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2924 }
2163 lpfc_no_rpi(phba, ndlp); 2925 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2926 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2165 return 1; 2928 return 1;
2166 } 2929 }
2167 return 0; 2930 return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3015
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3016 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3017 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3018 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3019 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3020 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3021 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3024
2262 spin_lock_irq(&phba->hbalock); 3025 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3026 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3027 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3028 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3029 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3030 if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3072 int rc;
2310 3073
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3074 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3075 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3076 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3077 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3078 * allocated by the firmware.
2315 */ 3079 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3080 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3081 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3082 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3083 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3084 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3085 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3317 * clear_la then don't send it.
2554 */ 3318 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3319 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3320 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3321 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3322 return;
2558 3323
2559 /* Link up discovery */ 3324 /* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3347
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3348 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3349 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3350 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3351 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3352 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3353 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3407 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3408 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3409 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3410 !(vport->fc_flag & FC_RSCN_MODE) &&
3411 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3412 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3413 return;
2648 } 3414 }
@@ -2919,11 +3685,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3685 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3686 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3687 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3688 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3689 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3690 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3691 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3692 lpfc_issue_clear_la(phba, vport);
3693 vport->port_state = LPFC_VPORT_READY;
3694 }
2927 } 3695 }
2928 3696
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3697 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3707 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3708 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3709 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3710 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3711 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3712 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3713 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3727 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3728 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3729 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3730 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3732 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3733 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3734 lpfc_issue_clear_la(phba, vport);
3735 vport->port_state = LPFC_VPORT_READY;
3736 }
2967 } 3737 }
2968 break; 3738 break;
2969 3739
@@ -3036,7 +3806,7 @@ restart_disc:
3036void 3806void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3807lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3808{
3039 MAILBOX_t *mb = &pmb->mb; 3809 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3810 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3811 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3812 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3814 pmb->context1 = NULL;
3045 3815
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3816 ndlp->nlp_rpi = mb->un.varWords[0];
3817 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3818 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3819 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3820
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4068 return 1;
3298 return 0; 4069 return 0;
3299} 4070}
4071
4072/**
4073 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4074 * @phba: Pointer to hba context object.
4075 *
4076 * This function iterate through all FC nodes associated
4077 * will all vports to check if there is any node with
4078 * fc_rports associated with it. If there is an fc_rport
4079 * associated with the node, then the node is either in
4080 * discovered state or its devloss_timer is pending.
4081 */
4082static int
4083lpfc_fcf_inuse(struct lpfc_hba *phba)
4084{
4085 struct lpfc_vport **vports;
4086 int i, ret = 0;
4087 struct lpfc_nodelist *ndlp;
4088 struct Scsi_Host *shost;
4089
4090 vports = lpfc_create_vport_work_array(phba);
4091
4092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4093 shost = lpfc_shost_from_vport(vports[i]);
4094 spin_lock_irq(shost->host_lock);
4095 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4096 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4097 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4098 ret = 1;
4099 spin_unlock_irq(shost->host_lock);
4100 goto out;
4101 }
4102 }
4103 spin_unlock_irq(shost->host_lock);
4104 }
4105out:
4106 lpfc_destroy_vport_work_array(phba, vports);
4107 return ret;
4108}
4109
4110/**
4111 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4112 * @phba: Pointer to hba context object.
4113 * @mboxq: Pointer to mailbox object.
4114 *
4115 * This function frees memory associated with the mailbox command.
4116 */
4117static void
4118lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4119{
4120 struct lpfc_vport *vport = mboxq->vport;
4121
4122 if (mboxq->u.mb.mbxStatus) {
4123 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4124 "2555 UNREG_VFI mbxStatus error x%x "
4125 "HBA state x%x\n",
4126 mboxq->u.mb.mbxStatus, vport->port_state);
4127 }
4128 mempool_free(mboxq, phba->mbox_mem_pool);
4129 return;
4130}
4131
4132/**
4133 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4134 * @phba: Pointer to hba context object.
4135 * @mboxq: Pointer to mailbox object.
4136 *
4137 * This function frees memory associated with the mailbox command.
4138 */
4139static void
4140lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4141{
4142 struct lpfc_vport *vport = mboxq->vport;
4143
4144 if (mboxq->u.mb.mbxStatus) {
4145 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4146 "2550 UNREG_FCFI mbxStatus error x%x "
4147 "HBA state x%x\n",
4148 mboxq->u.mb.mbxStatus, vport->port_state);
4149 }
4150 mempool_free(mboxq, phba->mbox_mem_pool);
4151 return;
4152}
4153
4154/**
4155 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4156 * @phba: Pointer to hba context object.
4157 *
4158 * This function check if there are any connected remote port for the FCF and
4159 * if all the devices are disconnected, this function unregister FCFI.
4160 * This function also tries to use another FCF for discovery.
4161 */
4162void
4163lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4164{
4165 LPFC_MBOXQ_t *mbox;
4166 int rc;
4167 struct lpfc_vport **vports;
4168 int i;
4169
4170 spin_lock_irq(&phba->hbalock);
4171 /*
4172 * If HBA is not running in FIP mode or
4173 * If HBA does not support FCoE or
4174 * If FCF is not registered.
4175 * do nothing.
4176 */
4177 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4178 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4179 (phba->cfg_enable_fip == 0)) {
4180 spin_unlock_irq(&phba->hbalock);
4181 return;
4182 }
4183 spin_unlock_irq(&phba->hbalock);
4184
4185 if (lpfc_fcf_inuse(phba))
4186 return;
4187
4188
4189 /* Unregister VPIs */
4190 vports = lpfc_create_vport_work_array(phba);
4191 if (vports &&
4192 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4193 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4194 lpfc_mbx_unreg_vpi(vports[i]);
4195 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4197 }
4198 lpfc_destroy_vport_work_array(phba, vports);
4199
4200 /* Unregister VFI */
4201 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4202 if (!mbox) {
4203 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4204 "2556 UNREG_VFI mbox allocation failed"
4205 "HBA state x%x\n",
4206 phba->pport->port_state);
4207 return;
4208 }
4209
4210 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4211 mbox->vport = phba->pport;
4212 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4213
4214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4215 if (rc == MBX_NOT_FINISHED) {
4216 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4217 "2557 UNREG_VFI issue mbox failed rc x%x "
4218 "HBA state x%x\n",
4219 rc, phba->pport->port_state);
4220 mempool_free(mbox, phba->mbox_mem_pool);
4221 return;
4222 }
4223
4224 /* Unregister FCF */
4225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4226 if (!mbox) {
4227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4228 "2551 UNREG_FCFI mbox allocation failed"
4229 "HBA state x%x\n",
4230 phba->pport->port_state);
4231 return;
4232 }
4233
4234 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4235 mbox->vport = phba->pport;
4236 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4238
4239 if (rc == MBX_NOT_FINISHED) {
4240 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4241 "2552 UNREG_FCFI issue mbox failed rc x%x "
4242 "HBA state x%x\n",
4243 rc, phba->pport->port_state);
4244 mempool_free(mbox, phba->mbox_mem_pool);
4245 return;
4246 }
4247
4248 spin_lock_irq(&phba->hbalock);
4249 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4250 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4251 FCF_VALID_VLAN);
4252 spin_unlock_irq(&phba->hbalock);
4253
4254 /*
4255 * If driver is not unloading, check if there is any other
4256 * FCF record that can be used for discovery.
4257 */
4258 if ((phba->pport->load_flag & FC_UNLOADING) ||
4259 (phba->link_state < LPFC_LINK_UP))
4260 return;
4261
4262 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4263
4264 if (rc)
4265 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4266 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4267 " record HBA state x%x\n",
4268 phba->pport->port_state);
4269}
4270
4271/**
4272 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4273 * @phba: Pointer to hba context object.
4274 * @buff: Buffer containing the FCF connection table as in the config
4275 * region.
4276 * This function create driver data structure for the FCF connection
4277 * record table read from config region 23.
4278 */
4279static void
4280lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4281 uint8_t *buff)
4282{
4283 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4284 struct lpfc_fcf_conn_hdr *conn_hdr;
4285 struct lpfc_fcf_conn_rec *conn_rec;
4286 uint32_t record_count;
4287 int i;
4288
4289 /* Free the current connect table */
4290 list_for_each_entry_safe(conn_entry, next_conn_entry,
4291 &phba->fcf_conn_rec_list, list)
4292 kfree(conn_entry);
4293
4294 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4295 record_count = conn_hdr->length * sizeof(uint32_t)/
4296 sizeof(struct lpfc_fcf_conn_rec);
4297
4298 conn_rec = (struct lpfc_fcf_conn_rec *)
4299 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4300
4301 for (i = 0; i < record_count; i++) {
4302 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4303 continue;
4304 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4305 GFP_KERNEL);
4306 if (!conn_entry) {
4307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4308 "2566 Failed to allocate connection"
4309 " table entry\n");
4310 return;
4311 }
4312
4313 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4314 sizeof(struct lpfc_fcf_conn_rec));
4315 conn_entry->conn_rec.vlan_tag =
4316 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4317 conn_entry->conn_rec.flags =
4318 le16_to_cpu(conn_entry->conn_rec.flags);
4319 list_add_tail(&conn_entry->list,
4320 &phba->fcf_conn_rec_list);
4321 }
4322}
4323
4324/**
4325 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4326 * @phba: Pointer to hba context object.
4327 * @buff: Buffer containing the FCoE parameter data structure.
4328 *
4329 * This function update driver data structure with config
4330 * parameters read from config region 23.
4331 */
4332static void
4333lpfc_read_fcoe_param(struct lpfc_hba *phba,
4334 uint8_t *buff)
4335{
4336 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4337 struct lpfc_fcoe_params *fcoe_param;
4338
4339 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4340 buff;
4341 fcoe_param = (struct lpfc_fcoe_params *)
4342 buff + sizeof(struct lpfc_fip_param_hdr);
4343
4344 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4345 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4346 return;
4347
4348 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4349 FIPP_MODE_ON)
4350 phba->cfg_enable_fip = 1;
4351
4352 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4353 FIPP_MODE_OFF)
4354 phba->cfg_enable_fip = 0;
4355
4356 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4357 phba->valid_vlan = 1;
4358 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4359 0xFFF;
4360 }
4361
4362 phba->fc_map[0] = fcoe_param->fc_map[0];
4363 phba->fc_map[1] = fcoe_param->fc_map[1];
4364 phba->fc_map[2] = fcoe_param->fc_map[2];
4365 return;
4366}
4367
4368/**
4369 * lpfc_get_rec_conf23 - Get a record type in config region data.
4370 * @buff: Buffer containing config region 23 data.
4371 * @size: Size of the data buffer.
4372 * @rec_type: Record type to be searched.
4373 *
4374 * This function searches config region data to find the begining
4375 * of the record specified by record_type. If record found, this
4376 * function return pointer to the record else return NULL.
4377 */
4378static uint8_t *
4379lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4380{
4381 uint32_t offset = 0, rec_length;
4382
4383 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4384 (size < sizeof(uint32_t)))
4385 return NULL;
4386
4387 rec_length = buff[offset + 1];
4388
4389 /*
4390 * One TLV record has one word header and number of data words
4391 * specified in the rec_length field of the record header.
4392 */
4393 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4394 <= size) {
4395 if (buff[offset] == rec_type)
4396 return &buff[offset];
4397
4398 if (buff[offset] == LPFC_REGION23_LAST_REC)
4399 return NULL;
4400
4401 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4402 rec_length = buff[offset + 1];
4403 }
4404 return NULL;
4405}
4406
4407/**
4408 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4409 * @phba: Pointer to lpfc_hba data structure.
4410 * @buff: Buffer containing config region 23 data.
4411 * @size: Size of the data buffer.
4412 *
4413 * This fuction parse the FCoE config parameters in config region 23 and
4414 * populate driver data structure with the parameters.
4415 */
4416void
4417lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4418 uint8_t *buff,
4419 uint32_t size)
4420{
4421 uint32_t offset = 0, rec_length;
4422 uint8_t *rec_ptr;
4423
4424 /*
4425 * If data size is less than 2 words signature and version cannot be
4426 * verified.
4427 */
4428 if (size < 2*sizeof(uint32_t))
4429 return;
4430
4431 /* Check the region signature first */
4432 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2567 Config region 23 has bad signature\n");
4435 return;
4436 }
4437
4438 offset += 4;
4439
4440 /* Check the data structure version */
4441 if (buff[offset] != LPFC_REGION23_VERSION) {
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443 "2568 Config region 23 has bad version\n");
4444 return;
4445 }
4446 offset += 4;
4447
4448 rec_length = buff[offset + 1];
4449
4450 /* Read FCoE param record */
4451 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4452 size - offset, FCOE_PARAM_TYPE);
4453 if (rec_ptr)
4454 lpfc_read_fcoe_param(phba, rec_ptr);
4455
4456 /* Read FCF connection table */
4457 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4458 size - offset, FCOE_CONN_TBL_TYPE);
4459 if (rec_ptr)
4460 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4461
4462}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b8..02aa016b93e9 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,9 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1155 1187
1156#define JEDEC_ID_ADDRESS 0x0080001c 1188#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1189#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1374#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1375#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1376#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1377
1348#define MBX_WRITE_WWN 0x98 1378#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1379#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1380#define MBX_LOAD_EXP_ROM 0x9C
1351 1381#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1382#define MBX_SLI4_REQ_FTRS 0x9D
1383#define MBX_MAX_CMDS 0x9E
1384#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1385#define MBX_SLI2_CMD_MASK 0x80
1386#define MBX_REG_VFI 0x9F
1387#define MBX_REG_FCFI 0xA0
1388#define MBX_UNREG_VFI 0xA1
1389#define MBX_UNREG_FCFI 0xA2
1390#define MBX_INIT_VFI 0xA3
1391#define MBX_INIT_VPI 0xA4
1354 1392
1355/* IOCB Commands */ 1393/* IOCB Commands */
1356 1394
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1478#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1479#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1480
1481/* Unhandled Data Security SLI Commands */
1482#define DSSCMD_IWRITE64_CR 0xD8
1483#define DSSCMD_IWRITE64_CX 0xD9
1484#define DSSCMD_IREAD64_CR 0xDA
1485#define DSSCMD_IREAD64_CX 0xDB
1486#define DSSCMD_INVALIDATE_DEK 0xDC
1487#define DSSCMD_SET_KEK 0xDD
1488#define DSSCMD_GET_KEK_ID 0xDE
1489#define DSSCMD_GEN_XFER 0xDF
1490
1443#define CMD_MAX_IOCB_CMD 0xE6 1491#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1492#define CMD_IOCB_MASK 0xff
1445 1493
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1514#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1515#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1516#define MBXERR_ERROR 16
1517#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1518#define MBX_NOT_FINISHED 255
1470 1519
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1520#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
1504#endif 1553#endif
1505}; 1554};
1506 1555
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1556typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1557#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1558 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
2287 uint32_t rsvd3; 2310 uint32_t rsvd3;
2288 uint32_t rsvd4; 2311 uint32_t rsvd4;
2289 uint32_t rsvd5; 2312 uint32_t rsvd5;
2290 uint16_t rsvd6; 2313 uint16_t vfi;
2291 uint16_t vpi; 2314 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2315#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2316 uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
2297 uint32_t rsvd4; 2320 uint32_t rsvd4;
2298 uint32_t rsvd5; 2321 uint32_t rsvd5;
2299 uint16_t vpi; 2322 uint16_t vpi;
2300 uint16_t rsvd6; 2323 uint16_t vfi;
2301#endif 2324#endif
2302} REG_VPI_VAR; 2325} REG_VPI_VAR;
2303 2326
@@ -2457,7 +2480,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2480 uint32_t entry_index:16;
2458#endif 2481#endif
2459 2482
2460 uint32_t rsvd1; 2483 uint32_t sli4_length;
2461 uint32_t word_cnt; 2484 uint32_t word_cnt;
2462 uint32_t resp_offset; 2485 uint32_t resp_offset;
2463} DUMP_VAR; 2486} DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2493#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2494#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2495
2496#define DMP_REGION_VPORT 0x16 /* VPort info region */
2497#define DMP_VPORT_REGION_SIZE 0x200
2498#define DMP_MBOX_OFFSET_WORD 0x5
2499
2500#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2501#define DMP_FCOEPARAM_RGN_SIZE 0x400
2502
2473#define WAKE_UP_PARMS_REGION_ID 4 2503#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2504#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2505
2506struct vport_rec {
2507 uint8_t wwpn[8];
2508 uint8_t wwnn[8];
2509};
2510
2511#define VPORT_INFO_SIG 0x32324752
2512#define VPORT_INFO_REV_MASK 0xff
2513#define VPORT_INFO_REV 0x1
2514#define MAX_STATIC_VPORT_COUNT 16
2515struct static_vport_info {
2516 uint32_t signature;
2517 uint32_t rev;
2518 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2519 uint32_t resvd[66];
2520};
2521
2476/* Option rom version structure */ 2522/* Option rom version structure */
2477struct prog_id { 2523struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2524#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
2697#endif 2743#endif
2698 2744
2699#ifdef __BIG_ENDIAN_BITFIELD 2745#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2746 uint32_t rsvd1 : 19; /* Reserved */
2747 uint32_t cdss : 1; /* Configure Data Security SLI */
2748 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2749 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2750 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2751 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2765,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2765 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2766 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2767 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2768 uint32_t rsvd2 : 3; /* Reserved */
2769 uint32_t cdss : 1; /* Configure Data Security SLI */
2770 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2771#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2772#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2773 uint32_t rsvd3 : 19; /* Reserved */
2774 uint32_t gdss : 1; /* Configure Data Security SLI */
2775 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2776 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2777 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2778 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2792,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2792 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2793 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2794 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2795 uint32_t rsvd4 : 3; /* Reserved */
2796 uint32_t gdss : 1; /* Configure Data Security SLI */
2797 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2798#endif
2745 2799
2746#ifdef __BIG_ENDIAN_BITFIELD 2800#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
2753 2807
2754#ifdef __BIG_ENDIAN_BITFIELD 2808#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2810 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2811#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2813 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2814#endif
2761 2815
2762 uint32_t rsvd4; /* Reserved */ 2816 uint32_t rsvd6; /* Reserved */
2763 2817
2764#ifdef __BIG_ENDIAN_BITFIELD 2818#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2819 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2820 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2821#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2822 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2823 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2824#endif
2771 2825
2772} CONFIG_PORT_VAR; 2826} CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3720#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3721#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3722#define SETVAR_MLORST 0x103007
3723
3724#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 000000000000..39c34b3ad29d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d8..2f5907f92eea 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 483 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 484 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 487 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 488 return -EIO;
466 } 489 }
467 } 490 }
468 491
492 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 493 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 495
472 /* Enable appropriate host interrupts */ 496 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 497 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 499 if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 594{
572 struct lpfc_vport **vports; 595 struct lpfc_vport **vports;
573 int i; 596 int i;
574 /* Disable interrupts */ 597
575 writel(0, phba->HCregaddr); 598 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 }
577 603
578 if (phba->pport->load_flag & FC_UNLOADING) 604 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 605 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 606 else {
581 vports = lpfc_create_vport_work_array(phba); 607 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 608 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 611 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 612 lpfc_destroy_vport_work_array(phba, vports);
586 } 613 }
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 615}
589 616
590/** 617/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 619 * @phba: pointer to lpfc HBA data structure.
593 * 620 *
594 * This routine will do uninitialization after the HBA is reset when bring 621 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 625 * 0 - sucess.
599 * Any other value - error. 626 * Any other value - error.
600 **/ 627 **/
601int 628static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 629lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 630{
604 struct lpfc_sli *psli = &phba->sli; 631 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 632 struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 669
643 return 0; 670 return 0;
644} 671}
672/**
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
675 *
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
678 *
679 * Return codes
680 * 0 - sucess.
681 * Any other value - error.
682 **/
683static int
684lpfc_hba_down_post_s4(struct lpfc_hba *phba)
685{
686 struct lpfc_scsi_buf *psb, *psb_next;
687 LIST_HEAD(aborts);
688 int ret;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
691 if (ret)
692 return ret;
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
697 * the port.
698 */
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
700 /* scsl_buf_list */
701 /* abts_sgl_list_lock required because worker thread uses this
702 * list.
703 */
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
709 * list.
710 */
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
713 &aborts);
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
716
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
718 psb->pCmd = NULL;
719 psb->status = IOSTAT_SUCCESS;
720 }
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
724 return 0;
725}
726
727/**
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
730 *
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
733 *
734 * Return codes
735 * 0 - sucess.
736 * Any other value - error.
737 **/
738int
739lpfc_hba_down_post(struct lpfc_hba *phba)
740{
741 return (*phba->lpfc_hba_down_post)(phba);
742}
645 743
646/** 744/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 745 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 907 "taking this port offline.\n");
810 908
811 spin_lock_irq(&phba->hbalock); 909 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 911 spin_unlock_irq(&phba->hbalock);
814 912
815 lpfc_offline_prep(phba); 913 lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 932 struct lpfc_sli *psli = &phba->sli;
835 933
836 spin_lock_irq(&phba->hbalock); 934 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 936 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 937 lpfc_offline_prep(phba);
840 938
841 lpfc_offline(phba); 939 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 940 lpfc_reset_barrier(phba);
941 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 942 lpfc_sli_brdreset(phba);
943 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 944 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 945 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 946 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 949}
850 950
851/** 951/**
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
954 *
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
957 **/
958static void
959lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
960{
961 lpfc_offline_prep(phba);
962 lpfc_offline(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
968}
969
970/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 972 * @phba: pointer to lpfc hba data structure.
854 * 973 *
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 983 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 984 struct lpfc_sli *psli = &phba->sli;
866 985
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
988 */
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
993 return;
994 }
995
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 997 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 998 "Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1000 phba->work_status[0], phba->work_status[1]);
872 1001
873 spin_lock_irq(&phba->hbalock); 1002 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1004 spin_unlock_irq(&phba->hbalock);
876 1005
877 1006
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1039 phba->work_hs = old_host_status & ~HS_FFER1;
911 1040
1041 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1042 phba->hba_flag &= ~DEFER_ERATT;
1043 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1046}
916 1047
1048static void
1049lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1050{
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1053
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1060 LPFC_NL_VENDOR_ID);
1061}
1062
917/** 1063/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1065 * @phba: pointer to lpfc hba data structure.
920 * 1066 *
921 * This routine is invoked to handle the following HBA hardware error 1067 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1070 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1071 * 3 - Mailbox command came back as unknown
926 **/ 1072 **/
927void 1073static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1074lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1075{
930 struct lpfc_vport *vport = phba->pport; 1076 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1077 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1080 unsigned long temperature;
935 struct temp_event temp_event_data; 1081 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1082 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1083
939 /* If the pci channel is offline, ignore possible errors, 1084 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1085 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1086 */
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
942 return; 1091 return;
1092 }
1093
943 /* If resets are disabled then leave the HBA alone and return */ 1094 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1095 if (!phba->cfg_enable_hba_reset)
945 return; 1096 return;
946 1097
947 /* Send an internal error event to mgmt application */ 1098 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1099 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1100
956 if (phba->hba_flag & DEFER_ERATT) 1101 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1102 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1110 phba->work_status[0], phba->work_status[1]);
966 1111
967 spin_lock_irq(&phba->hbalock); 1112 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1114 spin_unlock_irq(&phba->hbalock);
970 1115
971 /* 1116 /*
@@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1182}
1038 1183
1039/** 1184/**
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1189 * conditions.
1190 **/
1191static void
1192lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1193{
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1197
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev))
1202 return;
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1205 return;
1206
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1209
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1212 */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1216
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1222
1223 lpfc_sli4_offline_eratt(phba);
1224}
1225
1226/**
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1229 *
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1232 *
1233 * Return codes
1234 * 0 - sucess.
1235 * Any other value - error.
1236 **/
1237void
1238lpfc_handle_eratt(struct lpfc_hba *phba)
1239{
1240 (*phba->lpfc_handle_eratt)(phba);
1241}
1242
1243/**
1040 * lpfc_handle_latt - The HBA link event handler 1244 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1245 * @phba: pointer to lpfc hba data structure.
1042 * 1246 *
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1341 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1342 * 1 - success
1139 **/ 1343 **/
1140static int 1344int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1345lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1346{
1143 uint8_t lenlo, lenhi; 1347 uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1496 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1497 int max_speed;
1294 int GE = 0; 1498 int GE = 0;
1499 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1500 struct {
1296 char * name; 1501 char * name;
1297 int max_speed; 1502 int max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1642 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1644 break;
1645 case PCI_DEVICE_ID_TIGERSHARK:
1646 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1440 default: 1653 default:
1441 m = (typeof(m)){ NULL }; 1654 m = (typeof(m)){ NULL };
1442 break; 1655 break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1657
1445 if (mdp && mdp[0] == '\0') 1658 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1659 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1660 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1661 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1662 */
1450 m.name, m.max_speed, 1663 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1664 if (oneConnect)
1452 m.bus, 1665 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1667 m.name,
1668 phba->Port);
1669 else
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1673 (GE) ? "GE" : "Gb",
1674 m.bus,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1677 }
1454} 1678}
1455 1679
1456/** 1680/**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1758 icmd->ulpLe = 1;
1535 1759
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1761 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1763 kfree(mp1);
1539 cnt++; 1764 cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1986 * Lets wait for this to happen, if needed.
1762 */ 1987 */
1763 while (!list_empty(&vport->fc_nodes)) { 1988 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1989 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1991 "0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2006 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2007 msleep(10);
1784 } 2008 }
1785 return;
1786} 2009}
1787 2010
1788/** 2011/**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2026}
1804 2027
1805/** 2028/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2030 * @phba: pointer to lpfc hba data structure.
1808 * 2031 *
1809 * This routine stops all the timers associated with a HBA. This function is 2032 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2033 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2034 **/
1812static void 2035void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2036lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2037{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2038 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2039 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2040 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2041 del_timer_sync(&phba->eratt_poll);
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2044
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2049 break;
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2052 break;
2053 default:
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2056 phba->pci_dev_grp);
2057 break;
2058 }
1822 return; 2059 return;
1823} 2060}
1824 2061
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2115 return 1;
1879 } 2116 }
1880 2117
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2118 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2120 lpfc_unblock_mgmt_io(phba);
2121 return 1;
2122 }
2123 } else {
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2126 return 1;
2127 }
1884 } 2128 }
1885 2129
1886 vports = lpfc_create_vport_work_array(phba); 2130 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2131 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2133 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2134 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2135 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2191 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2192 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2193 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2195 struct Scsi_Host *shost;
1952 2196
1953 if (vports[i]->load_flag & FC_UNLOADING) 2197 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2198 continue;
2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2200 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2201 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2202 &vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2220 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2221 lpfc_destroy_vport_work_array(phba, vports);
1977 2222
1978 lpfc_sli_flush_mbox_queue(phba); 2223 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2224}
1980 2225
1981/** 2226/**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2242 return;
1998 2243
1999 /* stop all timers associated with this hba */ 2244 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2245 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2246 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2247 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2249 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2250 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2258 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2259 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2260 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2262 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2263 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2264 vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2351 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2352 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2353 shost->max_cmd_len = 16;
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2357 }
2109 2358
2110 /* 2359 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2360 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2372
2124 /* Initialize all internally managed lists. */ 2373 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2374 INIT_LIST_HEAD(&vport->fc_nodes);
2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2376 spin_lock_init(&vport->work_port_lock);
2127 2377
2128 init_timer(&vport->fc_disctmo); 2378 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2564}
2315 2565
2316/** 2566/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2567 * lpfc_stop_port_s3 - Stop SLI3 device port
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2572 * device.
2573 **/
2574static void
2575lpfc_stop_port_s3(struct lpfc_hba *phba)
2576{
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
2583
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2587}
2588
2589/**
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2592 *
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2595 * device.
2596 **/
2597static void
2598lpfc_stop_port_s4(struct lpfc_hba *phba)
2599{
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2606}
2607
2608/**
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2611 *
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2614 **/
2615void
2616lpfc_stop_port(struct lpfc_hba *phba)
2617{
2618 phba->lpfc_stop_port(phba);
2619}
2620
2621/**
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2624 *
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2627 *
2628 **/
2629void
2630lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2631{
2632 int rc = 0;
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
2637
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2639 if (!mboxq) {
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2642 return;
2643 }
2644
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
2650 /*
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
2653 */
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
2658
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2661 else {
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2664 }
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
2675 }
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
2678}
2679
2680/**
2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2682 * @phba: pointer to lpfc hba data structure.
2683 * @acqe_link: pointer to the async link completion queue entry.
2684 *
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2687 * status.
2688 *
2689 * Return: Link-attention status in terms of base driver's coding.
2690 **/
2691static uint16_t
2692lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
2694{
2695 uint16_t latt_fault;
2696
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2701 latt_fault = 0;
2702 break;
2703 default:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2708 break;
2709 }
2710 return latt_fault;
2711}
2712
2713/**
2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2715 * @phba: pointer to lpfc hba data structure.
2716 * @acqe_link: pointer to the async link completion queue entry.
2717 *
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
2720 *
2721 * Return: Link attention type in terms of base driver's coding.
2722 **/
2723static uint8_t
2724lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
2726{
2727 uint8_t att_type;
2728
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2733 break;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2737 break;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2740 break;
2741 default:
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2746 break;
2747 }
2748 return att_type;
2749}
2750
2751/**
2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2753 * @phba: pointer to lpfc hba data structure.
2754 * @acqe_link: pointer to the async link completion queue entry.
2755 *
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2758 *
2759 * Return: Link-attention link speed in terms of base driver's coding.
2760 **/
2761static uint8_t
2762lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
2764{
2765 uint8_t link_speed;
2766
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2779 break;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2782 break;
2783 default:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2788 break;
2789 }
2790 return link_speed;
2791}
2792
2793/**
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2797 *
2798 * This routine is to handle the SLI4 asynchronous link event.
2799 **/
2800static void
2801lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2803{
2804 struct lpfc_dmabuf *mp;
2805 LPFC_MBOXQ_t *pmb;
2806 MAILBOX_t *mb;
2807 READ_LA_VAR *la;
2808 uint8_t att_type;
2809
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2812 return;
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2814 if (!pmb) {
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2817 return;
2818 }
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2820 if (!mp) {
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2823 goto out_free_pmb;
2824 }
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2826 if (!mp->virt) {
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2830 }
2831
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2834
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2837
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2840
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2844
2845 /* Parse and translate status field */
2846 mb = &pmb->u.mb;
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2848
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2854
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2858 la->il = 0;
2859 la->pb = 0;
2860 la->fa = 0;
2861 la->mm = 0;
2862
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2874
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2877
2878 return;
2879
2880out_free_dmabuf:
2881 kfree(mp);
2882out_free_pmb:
2883 mempool_free(pmb, phba->mbox_mem_pool);
2884}
2885
2886/**
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2890 *
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2892 **/
2893static void
2894lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2896{
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2898 int rc;
2899
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2906 /*
2907 * If the current FCF is in discovered state,
2908 * do nothing.
2909 */
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2913 break;
2914 }
2915 spin_unlock_irq(&phba->hbalock);
2916
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2920 if (rc)
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2923 rc);
2924 break;
2925
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2931 break;
2932
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2940 break;
2941 /*
2942 * Currently, driver support only one FCF - so treat this as
2943 * a link down.
2944 */
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2948 break;
2949
2950 default:
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2954 break;
2955 }
2956}
2957
2958/**
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2962 *
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2964 **/
2965static void
2966lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2968{
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2971 "handled yet\n");
2972}
2973
2974/**
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2977 *
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2980 **/
2981void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2982{
2983 struct lpfc_cq_event *cq_event;
2984
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3001 break;
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3005 break;
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3009 break;
3010 default:
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3015 break;
3016 }
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3019 }
3020}
3021
3022/**
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3026 *
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3029 *
3030 * Return: 0 if success, otherwise -ENODEV
3031 **/
3032int
3033lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3034{
3035 int rc;
3036
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3039
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3043
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3046 if (rc)
3047 return -ENODEV;
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3050 if (rc)
3051 return -ENODEV;
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3054 if (rc)
3055 return -ENODEV;
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3058 if (rc)
3059 return -ENODEV;
3060
3061 return 0;
3062}
3063
3064/**
3065 * lpfc_log_intr_mode - Log the active interrupt mode
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3068 *
3069 * This routine it invoked to log the currently used active interrupt mode
3070 * to the device.
3071 **/
3072static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3073{
3074 switch (intr_mode) {
3075 case 0:
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3078 break;
3079 case 1:
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3082 break;
3083 case 2:
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3086 break;
3087 default:
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3090 break;
3091 }
3092 return;
3093}
3094
3095/**
3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
3097 * @phba: pointer to lpfc hba data structure.
3098 *
3099 * This routine is invoked to enable the PCI device that is common to all
3100 * PCI devices.
3101 *
3102 * Return codes
3103 * 0 - sucessful
3104 * other values - error
3105 **/
3106static int
3107lpfc_enable_pci_dev(struct lpfc_hba *phba)
3108{
3109 struct pci_dev *pdev;
3110 int bars;
3111
3112 /* Obtain PCI device reference */
3113 if (!phba->pcidev)
3114 goto out_error;
3115 else
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3121 goto out_error;
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
3129
3130 return 0;
3131
3132out_disable_device:
3133 pci_disable_device(pdev);
3134out_error:
3135 return -ENODEV;
3136}
3137
3138/**
3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3142 * This routine is invoked to disable the PCI device that is common to all
3143 * PCI devices.
3144 **/
3145static void
3146lpfc_disable_pci_dev(struct lpfc_hba *phba)
3147{
3148 struct pci_dev *pdev;
3149 int bars;
3150
3151 /* Obtain PCI device reference */
3152 if (!phba->pcidev)
3153 return;
3154 else
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
3163
3164 return;
3165}
3166
3167/**
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
3170 *
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
3175 **/
3176void
3177lpfc_reset_hba(struct lpfc_hba *phba)
3178{
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3182 return;
3183 }
3184 lpfc_offline_prep(phba);
3185 lpfc_offline(phba);
3186 lpfc_sli_brdrestart(phba);
3187 lpfc_online(phba);
3188 lpfc_unblock_mgmt_io(phba);
3189}
3190
3191/**
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3197 *
3198 * Return codes
3199 * 0 - sucessful
3200 * other values - error
3201 **/
3202static int
3203lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3204{
3205 struct lpfc_sli *psli;
3206
3207 /*
3208 * Initialize timers used by driver
3209 */
3210
3211 /* Heartbeat timer */
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3215
3216 psli = &phba->sli;
3217 /* MBOX heartbeat timer */
3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
3220 psli->mbox_tmo.data = (unsigned long) phba;
3221 /* FCP polling mode timer */
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3225 /* Fabric block timer */
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3229 /* EA polling mode timer */
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
3233
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3237
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
3240 /*
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
3244 */
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3248
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3253 }
3254
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3258
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
3262
3263 /*
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3265 */
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
3268
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3271 return -ENOMEM;
3272
3273 return 0;
3274}
3275
3276/**
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3282 **/
3283static void
3284lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3285{
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3288
3289 return;
3290}
3291
3292/**
3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3294 * @phba: pointer to lpfc hba data structure.
3295 *
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3298 *
3299 * Return codes
3300 * 0 - sucessful
3301 * other values - error
3302 **/
3303static int
3304lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3305{
3306 struct lpfc_sli *psli;
3307 int rc;
3308 int i, hbq_count;
3309
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3312 if (rc)
3313 return -ENODEV;
3314
3315 /*
3316 * Initialize timers used by driver
3317 */
3318
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3323
3324 psli = &phba->sli;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3337 /*
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3343 */
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3349
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3355
3356 /*
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3364 * 1k 50 52
3365 * 2k 114 116
3366 * 4k 242 244
3367 * 8k 498 500
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3372 */
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3379 else
3380 phba->cfg_sg_seg_cnt = 498;
3381
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3386
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3394
3395 /*
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3397 */
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3403
3404 /*
3405 * Initialize dirver internal slow-path work queues
3406 */
3407
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3420
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3424
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3427 if (rc)
3428 return -ENOMEM;
3429
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3432 if (unlikely(rc))
3433 goto out_free_mem;
3434
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3437 if (unlikely(rc))
3438 goto out_free_bsmbx;
3439
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3442 if (unlikely(rc))
3443 goto out_free_bsmbx;
3444
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3447 if (unlikely(rc))
3448 goto out_free_bsmbx;
3449
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3452 if (rc)
3453 goto out_free_bsmbx;
3454
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3457 if (rc)
3458 goto out_destroy_queue;
3459
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3462 if (rc) {
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3466 }
3467 rc = lpfc_init_active_sgl_array(phba);
3468 if (rc) {
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3472 }
3473
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3475 if (rc) {
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3479 }
3480
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3488 }
3489
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3497 }
3498
3499 return rc;
3500
3501out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3507out_free_sgl_list:
3508 lpfc_free_sgl_list(phba);
3509out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3511out_destroy_queue:
3512 lpfc_sli4_queue_destroy(phba);
3513out_free_bsmbx:
3514 lpfc_destroy_bootstrap_mbox(phba);
3515out_free_mem:
3516 lpfc_mem_free(phba);
3517 return rc;
3518}
3519
3520/**
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3523 *
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3526 **/
3527static void
3528lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3529{
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3531
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3534
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3537
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3540
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3543
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3546
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3550
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3553
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3556
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3560
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3563
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3566
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3569
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3573 kfree(conn_entry);
3574
3575 return;
3576}
3577
3578/**
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3582 *
3583 * This routine sets up the device INIT interface API function jump table
3584 * in @phba struct.
3585 *
3586 * Returns: 0 - success, -ENODEV - failure.
3587 **/
3588int
3589lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3590{
3591 switch (dev_grp) {
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3596 break;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3601 break;
3602 default:
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3605 dev_grp);
3606 return -ENODEV;
3607 break;
3608 }
3609 return 0;
3610}
3611
3612/**
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3615 *
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3618 *
3619 * Return codes
3620 * 0 - sucessful
3621 * other values - error
3622 **/
3623static int
3624lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3625{
3626 /*
3627 * Driver resources common to all SLI revisions
3628 */
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3631
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3634
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3638
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3641
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3645
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3648
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3651
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3654
3655 return 0;
3656}
3657
3658/**
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3664 *
3665 * Return codes
3666 * 0 - sucessful
3667 * other values - error
3668 **/
3669static int
3670lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3671{
3672 int error;
3673
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3679 return error;
3680 }
3681
3682 return 0;
3683}
3684
3685/**
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3688 *
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3691 * attached to.
3692 **/
3693static void
3694lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3695{
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3698}
3699
3700/**
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3703 *
3704 * This routine is invoked to free the driver's IOCB list and memory.
3705 **/
3706static void
3707lpfc_free_iocb_list(struct lpfc_hba *phba)
3708{
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3710
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3715 kfree(iocbq_entry);
3716 phba->total_iocbq_bufs--;
3717 }
3718 spin_unlock_irq(&phba->hbalock);
3719
3720 return;
3721}
3722
3723/**
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3729 *
3730 * Return codes
3731 * 0 - sucessful
3732 * other values - error
3733 **/
3734static int
3735lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3736{
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3738 uint16_t iotag;
3739 int i;
3740
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3743 for (i = 0; i < iocb_count; i++) {
3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
3748 __func__, i, LPFC_IOCB_LIST_CNT);
3749 goto out_free_iocbq;
3750 }
3751
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3753 if (iotag == 0) {
3754 kfree(iocbq_entry);
3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3756 "Unloading driver.\n", __func__);
3757 goto out_free_iocbq;
3758 }
3759 iocbq_entry->sli4_xritag = NO_XRI;
3760
3761 spin_lock_irq(&phba->hbalock);
3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
3764 spin_unlock_irq(&phba->hbalock);
3765 }
3766
3767 return 0;
3768
3769out_free_iocbq:
3770 lpfc_free_iocb_list(phba);
3771
3772 return -ENOMEM;
3773}
3774
3775/**
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3778 *
3779 * This routine is invoked to free the driver's sgl list and memory.
3780 **/
3781static void
3782lpfc_free_sgl_list(struct lpfc_hba *phba)
3783{
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3786 int rc = 0;
3787
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
3791
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3793 &sglq_list, list) {
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3796 kfree(sglq_entry);
3797 phba->sli4_hba.total_sglq_bufs--;
3798 }
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3800 if (rc) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3803 }
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3805}
3806
3807/**
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3810 *
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3813 **/
3814static int
3815lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3816{
3817 int size;
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3820
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3824 return -ENOMEM;
3825 return 0;
3826}
3827
3828/**
3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3830 * @phba: pointer to lpfc hba data structure.
3831 *
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3835 **/
3836static void
3837lpfc_free_active_sgl(struct lpfc_hba *phba)
3838{
3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3840}
3841
3842/**
3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3844 * @phba: pointer to lpfc hba data structure.
3845 *
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3848 *
3849 * Return codes
3850 * 0 - sucessful
3851 * other values - error
3852 **/
3853static int
3854lpfc_init_sgl_list(struct lpfc_hba *phba)
3855{
3856 struct lpfc_sglq *sglq_entry = NULL;
3857 int i;
3858 int els_xri_cnt;
3859
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3863 els_xri_cnt);
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3867
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3874 els_xri_cnt);
3875 return -ENOMEM;
3876 }
3877
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3881 GFP_KERNEL);
3882
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3887 els_xri_cnt);
3888 return -ENOMEM;
3889 }
3890
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3895
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3899
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3906 return -ENOMEM;
3907 }
3908
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3915 goto out_free_mem;
3916 }
3917
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3920 kfree(sglq_entry);
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3923 goto out_free_mem;
3924 }
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3928 kfree(sglq_entry);
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3935
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3942 }
3943 return 0;
3944
3945out_free_mem:
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3948 return -ENOMEM;
3949}
3950
3951/**
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3962 *
3963 * Return codes
3964 * 0 - sucessful
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3967 **/
3968int
3969lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3970{
3971 int rc = 0;
3972 int longs;
3973 uint16_t rpi_count;
3974 struct lpfc_rpi_hdr *rpi_hdr;
3975
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3977
3978 /*
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3981 */
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3984
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3987 GFP_KERNEL);
3988 if (!phba->sli4_hba.rpi_bmask)
3989 return -ENOMEM;
3990
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3992 if (!rpi_hdr) {
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
3996 rc = -ENODEV;
3997 }
3998
3999 return rc;
4000}
4001
4002/**
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4009 * by the device.
4010 *
4011 * Returns:
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4014 **/
4015struct lpfc_rpi_hdr *
4016lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4017{
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4021
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4024
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4028
4029 /*
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4033 */
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4035 return NULL;
4036
4037 /*
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4040 */
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4042 if (!dmabuf)
4043 return NULL;
4044
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4047 &dmabuf->phys,
4048 GFP_KERNEL);
4049 if (!dmabuf->virt) {
4050 rpi_hdr = NULL;
4051 goto err_free_dmabuf;
4052 }
4053
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4056 rpi_hdr = NULL;
4057 goto err_free_coherent;
4058 }
4059
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4062 if (!rpi_hdr)
4063 goto err_free_coherent;
4064
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4071
4072 /*
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4075 */
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4078 return rpi_hdr;
4079
4080 err_free_coherent:
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4083 err_free_dmabuf:
4084 kfree(dmabuf);
4085 return NULL;
4086}
4087
4088/**
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
2318 * @phba: pointer to lpfc hba data structure. 4090 * @phba: pointer to lpfc hba data structure.
2319 * 4091 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 4092 * This routine is invoked to remove all memory resources allocated
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 4093 * to support rpis. This routine presumes the caller has released all
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 4094 * rpis consumed by fabric or port logins and is prepared to have
2323 * on the current availability of PCI vector resources. The device driver is 4095 * the header pages removed.
2324 * responsible for calling the individual request_irq() to register each MSI-X 4096 **/
2325 * vector with a interrupt handler, which is done in this function. Note that 4097void
4098lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4099{
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4101
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4108 kfree(rpi_hdr);
4109 }
4110
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4113}
4114
4115/**
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4118 *
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4122 *
4123 * Return codes
4124 * pointer to @phba - sucessful
4125 * NULL - error
4126 **/
4127static struct lpfc_hba *
4128lpfc_hba_alloc(struct pci_dev *pdev)
4129{
4130 struct lpfc_hba *phba;
4131
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4134 if (!phba) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4137 return NULL;
4138 }
4139
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4142
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4146 kfree(phba);
4147 return NULL;
4148 }
4149
4150 return phba;
4151}
4152
4153/**
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4156 *
4157 * This routine is invoked to free the driver hba data structure with an
4158 * HBA device.
4159 **/
4160static void
4161lpfc_hba_free(struct lpfc_hba *phba)
4162{
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4165
4166 kfree(phba);
4167 return;
4168}
4169
4170/**
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4173 *
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4175 * host with it.
4176 *
4177 * Return codes
4178 * 0 - sucessful
4179 * other values - error
4180 **/
4181static int
4182lpfc_create_shost(struct lpfc_hba *phba)
4183{
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4186
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4192
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4194 if (!vport)
4195 return -ENODEV;
4196
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
4202
4203 return 0;
4204}
4205
4206/**
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to destroy HBA physical port and the associated
4211 * SCSI host.
4212 **/
4213static void
4214lpfc_destroy_shost(struct lpfc_hba *phba)
4215{
4216 struct lpfc_vport *vport = phba->pport;
4217
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4220
4221 return;
4222}
4223
4224/**
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4228 *
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4231 **/
4232static void
4233lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4234{
4235 int pagecnt = 10;
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4239 "SCSI layer\n");
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4242 }
4243 if (!_dump_buf_data) {
4244 while (pagecnt) {
4245 spin_lock_init(&_dump_buf_lock);
4246 _dump_buf_data =
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4255 break;
4256 } else
4257 --pagecnt;
4258 }
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4262 } else
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4266 while (pagecnt) {
4267 _dump_buf_dif =
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4276 break;
4277 } else
4278 --pagecnt;
4279 }
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4283 } else
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4285 _dump_buf_dif);
4286}
4287
4288/**
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4291 *
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4294 **/
4295static void
4296lpfc_post_init_setup(struct lpfc_hba *phba)
4297{
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4300
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4303
4304 /*
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4307 */
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4312
4313 lpfc_host_attrib_init(shost);
4314
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4319 }
4320
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4329 LPFC_NL_VENDOR_ID);
4330 return;
4331}
4332
4333/**
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4336 *
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4339 *
4340 * Return codes
4341 * 0 - sucessful
4342 * other values - error
4343 **/
4344static int
4345lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4346{
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4349 int i, hbq_count;
4350 void *ptr;
4351 int error = -ENODEV;
4352
4353 /* Obtain PCI device reference */
4354 if (!phba->pcidev)
4355 return error;
4356 else
4357 pdev = phba->pcidev;
4358
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4362 return error;
4363
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4366 */
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4369
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4372
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4378 goto out;
4379 }
4380
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4387 }
4388
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4391 SLI2_SLIM_SIZE,
4392 &phba->slim2p.phys,
4393 GFP_KERNEL);
4394 if (!phba->slim2p.virt)
4395 goto out_iounmap;
4396
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4402
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4406 GFP_KERNEL);
4407 if (!phba->hbqslimp.virt)
4408 goto out_free_slim;
4409
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4417 }
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4420
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4422
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4424
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4430
4431 return 0;
4432
4433out_free_slim:
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4436out_iounmap:
4437 iounmap(phba->ctrl_regs_memmap_p);
4438out_iounmap_slim:
4439 iounmap(phba->slim_memmap_p);
4440out:
4441 return error;
4442}
4443
4444/**
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4447 *
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4450 **/
4451static void
4452lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4453{
4454 struct pci_dev *pdev;
4455
4456 /* Obtain PCI device reference */
4457 if (!phba->pcidev)
4458 return;
4459 else
4460 pdev = phba->pcidev;
4461
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4467
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4471
4472 return;
4473}
4474
4475/**
4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4477 * @phba: pointer to lpfc hba data structure.
4478 *
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
4481 *
4482 * Return 0 if successful, otherwise -ENODEV.
4483 **/
4484int
4485lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4486{
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
4490
4491 if (!phba->sli4_hba.STAregaddr)
4492 return -ENODEV;
4493
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4507 }
4508 return -ENODEV;
4509 }
4510
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4517 break;
4518 }
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4521 port_error = 0;
4522 break;
4523 }
4524 msleep(10);
4525 }
4526
4527 if (port_error)
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4539
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4549
4550 return port_error;
4551}
4552
4553/**
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4556 *
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4558 * memory map.
4559 **/
4560static void
4561lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4562{
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE0;
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_ONLINE1;
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4572 LPFC_SCRATCHPAD;
4573}
4574
4575/**
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4584{
4585
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_STATE;
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_ISR0;
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_IMR0;
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4593 LPFC_HST_ISCR0;
4594 return;
4595}
4596
4597/**
4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 * @vf: virtual function number
4601 *
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4604 *
4605 * Return 0 if successful, otherwise -ENODEV.
4606 **/
4607static int
4608lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4609{
4610 if (vf > LPFC_VIR_FUNC_MAX)
4611 return -ENODEV;
4612
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4623 return 0;
4624}
4625
4626/**
4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4628 * @phba: pointer to lpfc hba data structure.
4629 *
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4635 * this routine.
4636 *
4637 * Return codes
4638 * 0 - sucessful
4639 * ENOMEM - could not allocated memory.
4640 **/
4641static int
4642lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4643{
4644 uint32_t bmbx_size;
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4647 uint32_t pa_addr;
4648 uint64_t phys_addr;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
4653
4654 /*
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4657 */
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 bmbx_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
4666 }
4667 memset(dmabuf->virt, 0, bmbx_size);
4668
4669 /*
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4675 */
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4678
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4683
4684 /*
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4691 */
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4697
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4701 return 0;
4702}
4703
4704/**
4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4706 * @phba: pointer to lpfc hba data structure.
4707 *
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4713 *
4714 **/
4715static void
4716lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4717{
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4722
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4725}
4726
4727/**
4728 * lpfc_sli4_read_config - Get the config parameters.
4729 * @phba: pointer to lpfc hba data structure.
4730 *
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
4735 *
4736 * Return codes
4737 * 0 - sucessful
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
4740 **/
4741static int
4742lpfc_sli4_read_config(struct lpfc_hba *phba)
4743{
4744 LPFC_MBOXQ_t *pmb;
4745 struct lpfc_mbx_read_config *rd_config;
4746 uint32_t rc = 0;
4747
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4749 if (!pmb) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4753 return -ENOMEM;
4754 }
4755
4756 lpfc_read_config(phba, pmb);
4757
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4765 rc = -EIO;
4766 } else {
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4805 "VPI(B:%d M:%d) "
4806 "VFI(B:%d M:%d) "
4807 "RPI(B:%d M:%d) "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
4819 }
4820 mempool_free(pmb, phba->mbox_mem_pool);
4821
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4826 return rc;
4827}
4828
4829/**
4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4831 * @phba: pointer to lpfc hba data structure.
4832 *
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4835 *
4836 * Return codes
4837 * 0 - sucessful
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
4840 **/
4841static int
4842lpfc_setup_endian_order(struct lpfc_hba *phba)
4843{
4844 LPFC_MBOXQ_t *mboxq;
4845 uint32_t rc = 0;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
4848
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4850 if (!mboxq) {
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4854 return -ENOMEM;
4855 }
4856
4857 /*
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4860 */
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4867 "status x%x\n",
4868 rc);
4869 rc = -EIO;
4870 }
4871
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4873 return rc;
4874}
4875
4876/**
4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
4878 * @phba: pointer to lpfc hba data structure.
4879 *
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4884 *
4885 * Return codes
4886 * 0 - sucessful
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
4889 **/
4890static int
4891lpfc_sli4_queue_create(struct lpfc_hba *phba)
4892{
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
4897
4898 /*
4899 * Sanity check for confiugred queue parameters against the run-time
4900 * device parameters
4901 */
4902
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4908 LPFC_SP_WQN_DEF;
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4913 "FCP WQs (%d)\n",
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4916 goto out_error;
4917 }
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4925 }
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4928
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4934 LPFC_SP_EQN_DEF;
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4939 "EQs (%d)\n",
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4942 goto out_error;
4943 }
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4951 }
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4962 }
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4967
4968 /*
4969 * Create Event Queues (EQs)
4970 */
4971
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4975
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4979 if (!qdesc) {
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4982 goto out_error;
4983 }
4984 phba->sli4_hba.sp_eq = qdesc;
4985
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4994 }
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4998 if (!qdesc) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5002 }
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5004 }
5005
5006 /*
5007 * Create Complete Queues (CQs)
5008 */
5009
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5013
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5017 if (!qdesc) {
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5021 }
5022 phba->sli4_hba.mbx_cq = qdesc;
5023
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5027 if (!qdesc) {
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5031 }
5032 phba->sli4_hba.els_cq = qdesc;
5033
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5037 if (!qdesc) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5041 }
5042 phba->sli4_hba.rxq_cq = qdesc;
5043
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5052 }
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5056 if (!qdesc) {
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5061 }
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5063 }
5064
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5068
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5071 if (!qdesc) {
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5075 }
5076 phba->sli4_hba.mbx_wq = qdesc;
5077
5078 /*
5079 * Create all the Work Queues (WQs)
5080 */
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5083
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5087 if (!qdesc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5091 }
5092 phba->sli4_hba.els_wq = qdesc;
5093
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5102 }
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5106 if (!qdesc) {
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5111 }
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5113 }
5114
5115 /*
5116 * Create Receive Queue (RQ)
5117 */
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5120
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5124 if (!qdesc) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5128 }
5129 phba->sli4_hba.hdr_rq = qdesc;
5130
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5134 if (!qdesc) {
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5138 }
5139 phba->sli4_hba.dat_rq = qdesc;
5140
5141 return 0;
5142
5143out_free_hdr_rq:
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5146out_free_fcp_wq:
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5150 }
5151 kfree(phba->sli4_hba.fcp_wq);
5152out_free_els_wq:
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5155out_free_mbx_wq:
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5158out_free_fcp_cq:
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5162 }
5163 kfree(phba->sli4_hba.fcp_cq);
5164out_free_rxq_cq:
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5167out_free_els_cq:
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5170out_free_mbx_cq:
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5173out_free_fp_eq:
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5177 }
5178 kfree(phba->sli4_hba.fp_eq);
5179out_free_sp_eq:
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5182out_error:
5183 return -ENOMEM;
5184}
5185
5186/**
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5191 * operation.
5192 *
5193 * Return codes
5194 * 0 - sucessful
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5197 **/
5198static void
5199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5200{
5201 int fcp_qidx;
5202
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5206
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5210
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5216
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5222
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5226
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5230
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5234
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5240
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5246
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5250
5251 return;
5252}
5253
5254/**
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5257 *
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5259 * operation.
5260 *
5261 * Return codes
5262 * 0 - sucessful
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5265 **/
5266int
5267lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5268{
5269 int rc = -ENOMEM;
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5272
5273 /*
5274 * Set up Event Queues (EQs)
5275 */
5276
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5281 goto out_error;
5282 }
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5284 LPFC_SP_DEF_IMAX);
5285 if (rc) {
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5288 "rc = 0x%x\n", rc);
5289 goto out_error;
5290 }
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5294
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5302 }
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5305 if (rc) {
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5310 }
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5315 }
5316
5317 /*
5318 * Set up Complete Queues (CQs)
5319 */
5320
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5326 }
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5329 if (rc) {
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5332 "rc = 0x%x\n", rc);
5333 goto out_destroy_fp_eq;
5334 }
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5339
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5345 }
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5348 if (rc) {
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5351 "rc = 0x%x\n", rc);
5352 goto out_destroy_mbx_cq;
5353 }
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5358
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5364 }
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5367 if (rc) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5370 "rc = 0x%x\n", rc);
5371 goto out_destroy_els_cq;
5372 }
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5377
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5385 }
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5389 if (rc) {
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5394 }
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5398 fcp_cqidx,
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5400 fcp_cqidx,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5402 }
5403
5404 /*
5405 * Set up all the Work Queues (WQs)
5406 */
5407
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5413 }
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5416 if (rc) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5419 "rc = 0x%x\n", rc);
5420 goto out_destroy_fcp_cq;
5421 }
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5426
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5432 }
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5435 if (rc) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5438 "rc = 0x%x\n", rc);
5439 goto out_destroy_mbx_wq;
5440 }
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5445
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5453 }
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5456 LPFC_FCP);
5457 if (rc) {
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5462 }
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5466 fcp_wqidx,
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5468 fcp_cq_index,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5472 }
5473
5474 /*
5475 * Create Receive Queue (RQ)
5476 */
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5481 }
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5484 if (rc) {
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5487 "rc = 0x%x\n", rc);
5488 goto out_destroy_fcp_wq;
5489 }
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5496 return 0;
5497
5498out_destroy_fcp_wq:
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5502out_destroy_mbx_wq:
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5504out_destroy_fcp_cq:
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5508out_destroy_els_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5510out_destroy_mbx_cq:
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5512out_destroy_fp_eq:
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5516out_error:
5517 return rc;
5518}
5519
5520/**
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5523 *
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5525 * operation.
5526 *
5527 * Return codes
5528 * 0 - sucessful
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5531 **/
5532void
5533lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5534{
5535 int fcp_qidx;
5536
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5560}
5561
5562/**
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5565 *
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5573 *
5574 * Return codes
5575 * 0 - sucessful
5576 * -ENOMEM - No availble memory
5577 **/
5578static int
5579lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5580{
5581 struct lpfc_cq_event *cq_event;
5582 int i;
5583
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5586 if (!cq_event)
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5590 }
5591 return 0;
5592
5593out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5595 return -ENOMEM;
5596}
5597
5598/**
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5601 *
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5607 **/
5608static void
5609lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5610{
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5612
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5616 kfree(cq_event);
5617 }
5618}
5619
5620/**
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5626 *
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5628 * NULL otherwise.
5629 **/
5630struct lpfc_cq_event *
5631__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5632{
5633 struct lpfc_cq_event *cq_event = NULL;
5634
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5637 return cq_event;
5638}
5639
5640/**
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5643 *
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5646 *
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5648 * NULL otherwise.
5649 **/
5650struct lpfc_cq_event *
5651lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5652{
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5659 return cq_event;
5660}
5661
5662/**
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5666 *
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5669 **/
5670void
5671__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5673{
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5675}
5676
5677/**
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5681 *
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5684 **/
5685void
5686lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5688{
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5693}
5694
5695/**
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5701 **/
5702static void
5703lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5704{
5705 LIST_HEAD(cqelist);
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5708
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5713 &cqelist);
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5716 &cqelist);
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5719 &cqelist);
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5721
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5725 }
5726}
5727
5728/**
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5731 *
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5734 *
5735 * Return codes
5736 * 0 - sucessful
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5739 **/
5740int
5741lpfc_pci_function_reset(struct lpfc_hba *phba)
5742{
5743 LPFC_MBOXQ_t *mboxq;
5744 uint32_t rc = 0;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5747
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5749 if (!mboxq) {
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5753 return -ENOMEM;
5754 }
5755
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5772 rc = -ENXIO;
5773 }
5774 return rc;
5775}
5776
5777/**
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5781 *
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5784 *
5785 * Return: the number of NOP mailbox command completed.
5786 **/
5787static int
5788lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5789{
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5792 uint32_t mbox_tmo;
5793 uint32_t rc = 0;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5796
5797 if (cnt == 0) {
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5800 return cnt;
5801 }
5802
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5804 if (!mboxq) {
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5808 return 0;
5809 }
5810
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5816
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 else
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5824 break;
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5830 &shdr->response);
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5837 break;
5838 }
5839 }
5840
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5843
5844 return cmdsent;
5845}
5846
5847/**
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5850 * @fcfi: fcf index.
5851 *
5852 * This routine is invoked to unregister a FCFI from device.
5853 **/
5854void
5855lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5856{
5857 LPFC_MBOXQ_t *mbox;
5858 uint32_t mbox_tmo;
5859 int rc;
5860 unsigned long flags;
5861
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863
5864 if (!mbox)
5865 return;
5866
5867 lpfc_unreg_fcfi(mbox, fcfi);
5868
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5871 else {
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5874 }
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5882 else {
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5888 }
5889}
5890
5891/**
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5897 *
5898 * Return codes
5899 * 0 - sucessful
5900 * other values - error
5901 **/
5902static int
5903lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5904{
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5908
5909 /* Obtain PCI device reference */
5910 if (!phba->pcidev)
5911 return error;
5912 else
5913 pdev = phba->pcidev;
5914
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5918 return error;
5919
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5923 */
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5926
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5929
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5932
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5939 goto out;
5940 }
5941
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5949 }
5950
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5958 }
5959
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5962
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5965
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5968 if (error)
5969 goto out_iounmap_all;
5970
5971 return 0;
5972
5973out_iounmap_all:
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5975out_iounmap_ctrl:
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5977out_iounmap_conf:
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5979out:
5980 return error;
5981}
5982
5983/**
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5989 **/
5990static void
5991lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5992{
5993 struct pci_dev *pdev;
5994
5995 /* Obtain PCI device reference */
5996 if (!phba->pcidev)
5997 return;
5998 else
5999 pdev = phba->pcidev;
6000
6001 /* Free coherent DMA memory allocated */
6002
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6007
6008 return;
6009}
6010
6011/**
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6014 *
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6022 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6023 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6029 * other values - error
2334 **/ 6030 **/
2335static int 6031static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6032lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6033{
2338 int rc, i; 6034 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6035 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6047 goto msi_fail_out;
2352 } else 6048 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6051 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6052 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6053 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6054 phba->msix_entries[i].entry);
2359 /* 6055 /*
2360 * Assign MSI-X vectors to interrupt handlers 6056 * Assign MSI-X vectors to interrupt handlers
2361 */ 6057 */
2362 6058
2363 /* vector-0 is associated to slow-path handler */ 6059 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6060 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6063 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6065 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6068 }
2372 6069
2373 /* vector-1 is associated to fast-path handler */ 6070 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6071 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6074
2377 if (rc) { 6075 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6100 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6101 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6103 goto mbx_fail_out;
2406 } 6104 }
2407 6105
@@ -2428,14 +6126,14 @@ msi_fail_out:
2428} 6126}
2429 6127
2430/** 6128/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6130 * @phba: pointer to lpfc hba data structure.
2433 * 6131 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6132 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6134 **/
2437static void 6135static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6136lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6137{
2440 int i; 6138 int i;
2441 6139
@@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6142 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6143 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6144 pci_disable_msix(phba->pcidev);
6145
6146 return;
2447} 6147}
2448 6148
2449/** 6149/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6151 * @phba: pointer to lpfc hba data structure.
2452 * 6152 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6153 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6155 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
2457 * 6158 *
2458 * Return codes 6159 * Return codes
2459 * 0 - sucessful 6160 * 0 - sucessful
2460 * other values - error 6161 * other values - error
2461 */ 6162 */
2462static int 6163static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6164lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6165{
2465 int rc; 6166 int rc;
2466 6167
@@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6175 return rc;
2475 } 6176 }
2476 6177
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6180 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6181 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6186}
2486 6187
2487/** 6188/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6190 * @phba: pointer to lpfc hba data structure.
2490 * 6191 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6192 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6196 * its vector.
2495 */ 6197 */
2496
2497static void 6198static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6199lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6200{
2500 free_irq(phba->pcidev->irq, phba); 6201 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6202 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6204}
2504 6205
2505/** 6206/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6208 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6209 *
2510 * This routine it invoked to log the currently used active interrupt mode 6210 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6215 * of:
6216 * MSI-X -> MSI -> IRQ.
6217 *
6218 * Return codes
6219 * 0 - sucessful
6220 * other values - error
6221 **/
6222static uint32_t
6223lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6224{
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6226 int retval;
6227
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6231 if (!retval) {
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6234 if (!retval) {
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6237 intr_mode = 2;
6238 }
6239 }
6240 }
6241
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6245 if (!retval) {
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6248 intr_mode = 1;
6249 }
6250 }
6251
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6256 if (!retval) {
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6259 intr_mode = 0;
6260 }
6261 }
6262 return intr_mode;
6263}
6264
6265/**
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6268 *
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6273 **/
2513static void 6274static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6275lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6276{
2516 switch (intr_mode) { 6277 /* Disable the currently initialized interrupt mode */
2517 case 0: 6278 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6279 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6280 else if (phba->intr_type == MSI)
2520 break; 6281 lpfc_sli_disable_msi(phba);
2521 case 1: 6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6284
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6288
6289 return;
6290}
6291
6292/**
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6307 *
6308 * Return codes
6309 * 0 - sucessful
6310 * other values - error
6311 **/
6312static int
6313lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6314{
6315 int rc, index;
6316
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6320
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6324 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6327 goto msi_fail_out;
2525 case 2: 6328 }
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6332 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6333 "message=%d\n", index,
2529 default: 6334 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6335 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6336 /*
2532 break; 6337 * Assign MSI-X vectors to interrupt handlers
6338 */
6339
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6347 "(%d)\n", rc);
6348 goto msi_fail_out;
2533 } 6349 }
2534 return; 6350
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6359 if (rc) {
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6363 goto cfg_fail_out;
6364 }
6365 }
6366
6367 return rc;
6368
6369cfg_fail_out:
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6374
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6377
6378msi_fail_out:
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6381 return rc;
2535} 6382}
2536 6383
6384/**
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6387 *
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6390 **/
2537static void 6391static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6392lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6393{
2540 /* Clear all interrupt enable conditions */ 6394 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6395
2547 /* Reset some HBA SLI setup states */ 6396 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6398
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6402 /* Disable MSI-X */
6403 pci_disable_msix(phba->pcidev);
6404
6405 return;
6406}
6407
6408/**
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6411 *
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6417 *
6418 * Return codes
6419 * 0 - sucessful
6420 * other values - error
6421 **/
6422static int
6423lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6424{
6425 int rc, index;
6426
6427 rc = pci_enable_msi(phba->pcidev);
6428 if (!rc)
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6431 else {
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6434 return rc;
6435 }
6436
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6439 if (rc) {
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6443 }
6444
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6448 }
6449
6450 return rc;
6451}
2550 6452
6453/**
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6456 *
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6461 * its vector.
6462 **/
6463static void
6464lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6465{
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
2551 return; 6468 return;
2552} 6469}
2553 6470
2554/** 6471/**
2555 * lpfc_enable_intr - Enable device interrupt 6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6473 * @phba: pointer to lpfc hba data structure.
2557 * 6474 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6475 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6477 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6478 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6479 * interrupt mode which is supported by the platform, kernel, and device in
6480 * the order of:
6481 * MSI-X -> MSI -> IRQ.
2563 * 6482 *
2564 * Return codes 6483 * Return codes
2565 * 0 - sucessful 6484 * 0 - sucessful
2566 * other values - error 6485 * other values - error
2567 **/ 6486 **/
2568static uint32_t 6487static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6488lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6489{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6490 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6491 int retval, index;
2573 6492
2574 if (cfg_mode == 2) { 6493 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6494 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6495 retval = 0;
2577 if (!retval) { 6496 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6497 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6498 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6499 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6500 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6501 phba->intr_type = MSIX;
@@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6506
2588 /* Fallback to MSI if MSI-X initialization failed */ 6507 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6509 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6510 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6511 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6512 phba->intr_type = MSI;
@@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6516
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6518 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6521 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6522 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6523 phba->intr_type = INTx;
2605 intr_mode = 0; 6524 intr_mode = 0;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6526 index++) {
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529 }
2606 } 6530 }
2607 } 6531 }
2608 return intr_mode; 6532 return intr_mode;
2609} 6533}
2610 6534
2611/** 6535/**
2612 * lpfc_disable_intr - Disable device interrupt 6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6537 * @phba: pointer to lpfc hba data structure.
2614 * 6538 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6539 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6542 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6543 **/
2620static void 6544static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6545lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6546{
2623 /* Disable the currently initialized interrupt mode */ 6547 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6548 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6549 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6550 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6551 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6552 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6553 free_irq(phba->pcidev->irq, phba);
2630 6554
@@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6560}
2637 6561
2638/** 6562/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6564 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6565 *
2651 * Return code 6566 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6567 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6568 **/
2655static int __devinit 6569static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6570lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6571{
2658 struct lpfc_vport *vport = NULL; 6572 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676 6574
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 6575 spin_lock_irq(shost->host_lock);
2678 if (!phba) 6576 vport->load_flag |= FC_UNLOADING;
2679 goto out_release_regions; 6577 spin_unlock_irq(shost->host_lock);
2680 6578
2681 atomic_set(&phba->fast_event_count, 0); 6579 lpfc_stop_hba_timers(phba);
2682 spin_lock_init(&phba->hbalock);
2683 6580
2684 /* Initialize ndlp management spinlock */ 6581 phba->pport->work_port_events = 0;
2685 spin_lock_init(&phba->ndlp_lock);
2686 6582
2687 phba->pcidev = pdev; 6583 lpfc_sli_hba_down(phba);
2688 6584
2689 /* Assign an unused board number */ 6585 lpfc_sli_brdrestart(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6586
2693 INIT_LIST_HEAD(&phba->port_list); 6587 lpfc_sli_disable_intr(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6588
2702 /* Initialize timers used by driver */ 6589 return;
2703 init_timer(&phba->hb_tmofunc); 6590}
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6591
2707 psli = &phba->sli; 6592/**
2708 init_timer(&psli->mbox_tmo); 6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2709 psli->mbox_tmo.function = lpfc_mbox_timeout; 6594 * @phba: pointer to lpfc hba data structure.
2710 psli->mbox_tmo.data = (unsigned long) phba; 6595 *
2711 init_timer(&phba->fcp_poll_timer); 6596 * This routine is invoked to unset the HBA device initialization steps to
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout; 6597 * a device with SLI-4 interface spec.
2713 phba->fcp_poll_timer.data = (unsigned long) phba; 6598 **/
2714 init_timer(&phba->fabric_block_timer); 6599static void
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 6600lpfc_sli4_unset_hba(struct lpfc_hba *phba)
2716 phba->fabric_block_timer.data = (unsigned long) phba; 6601{
2717 init_timer(&phba->eratt_poll); 6602 struct lpfc_vport *vport = phba->pport;
2718 phba->eratt_poll.function = lpfc_poll_eratt; 6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6604
2721 pci_set_master(pdev); 6605 spin_lock_irq(shost->host_lock);
2722 pci_save_state(pdev); 6606 vport->load_flag |= FC_UNLOADING;
2723 pci_try_set_mwi(pdev); 6607 spin_unlock_irq(shost->host_lock);
2724 6608
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6609 phba->pport->work_port_events = 0;
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
2727 goto out_idr_remove;
2728 6610
2729 /* 6611 lpfc_sli4_hba_down(phba);
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6612
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6613 lpfc_sli4_disable_intr(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6614
2739 /* Map HBA SLIM to a kernel virtual address. */ 6615 return;
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6616}
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747
2748 /* Map HBA Control Registers to a kernel virtual address. */
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6617
2757 /* Allocate memory for SLI-2 structures */ 6618/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6620 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6621 *
2761 GFP_KERNEL); 6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6623 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6627 **/
6628static void
6629lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6630{
6631 int wait_cnt = 0;
6632 LPFC_MBOXQ_t *mboxq;
2764 6633
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6634 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6635 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6636
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6637 /*
2772 lpfc_sli_hbq_size(), 6638 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6639 * mailbox command.
2774 GFP_KERNEL); 6640 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6641
2778 hbq_count = lpfc_sli_hbq_count(); 6642 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6643 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6645 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6646 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6648 msleep(10);
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6650 break;
2785 } 6651 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6652 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6654 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6655 mboxq = phba->sli.mbox_active;
2790 6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6657 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6659 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6660 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6661 }
2802 6662
2803 /* Initialize and populate the iocb list per host. */ 6663 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6664 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6665
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6666 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6667 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6668
2825 spin_lock_irq(&phba->hbalock); 6669 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6670 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6671
2831 /* Initialize HBA structure */ 6672 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6673 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6674}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6675
2837 INIT_LIST_HEAD(&phba->work_list); 6676/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6680 *
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6688 *
6689 * Return code
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6692 **/
6693static int __devinit
6694lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6695{
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6698 int error;
6699 uint32_t cfg_mode, intr_mode;
2840 6700
2841 /* Initialize the wait queue head for the kernel thread */ 6701 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6702 phba = lpfc_hba_alloc(pdev);
6703 if (!phba)
6704 return -ENOMEM;
2843 6705
2844 /* Startup the kernel thread for this host adapter. */ 6706 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6707 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6708 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6710 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6711 goto out_free_phba;
2850 } 6712 }
2851 6713
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6716 if (error)
6717 goto out_disable_pci_dev;
2855 6718
2856 /* Initialize list of fabric iocbs */ 6719 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6720 error = lpfc_sli_pci_mem_setup(phba);
6721 if (error) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6725 }
2858 6726
2859 /* Initialize list to save ELS buffers */ 6727 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6728 error = lpfc_setup_driver_resource_phase1(phba);
6729 if (error) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6733 }
2861 6734
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6735 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6736 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6737 if (error) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6741 }
2865 6742
2866 shost = lpfc_shost_from_vport(vport); 6743 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6745 if (error) {
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6749 }
2869 6750
2870 pci_set_drvdata(pdev, shost); 6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6753 if (error) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6757 }
2871 6758
2872 phba->MBslimaddr = phba->slim_memmap_p; 6759 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6760 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6761 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6765 }
2877 6766
2878 /* Configure sysfs attributes */ 6767 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6770 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6772 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6773 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6774 }
2885 6775
6776 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6777 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6778 while (true) {
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6781 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6783 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6785 "0431 Failed to enable interrupt.\n");
6786 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6787 goto out_free_sysfs_attr;
2894 } 6788 }
2895 /* HBA SLI setup */ 6789 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6790 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6792 "1477 Failed to set up hba\n");
@@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6796
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6797 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6798 msleep(50);
2905 /* Check active interrupts received */ 6799 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6802 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6803 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6804 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6805 break;
2911 } else { 6806 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6808 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6809 "failed active interrupt test.\n",
2915 intr_mode); 6810 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6811 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6812 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6813 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6814 cfg_mode = --intr_mode;
2929 } 6815 }
2930 } 6816 }
2931 6817
2932 /* 6818 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6819 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6820
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6821 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6822 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6823
3025 return 0; 6824 return 0;
3026 6825
3027out_remove_device: 6826out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6827 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6828out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6829 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6830out_destroy_shost:
3039 destroy_port(vport); 6831 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6832out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6833 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6834out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6835 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6836out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6837 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6838out_unset_pci_mem_s3:
3047 } 6839 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6840out_disable_pci_dev:
3049out_free_hbqslimp: 6841 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6842out_free_phba:
3062 kfree(phba); 6843 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6844 return error;
3072} 6845}
3073 6846
3074/** 6847/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6849 * @pdev: pointer to PCI device
3077 * 6850 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6851 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
3081 **/ 6855 **/
3082static void __devexit 6856static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6857lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6858{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6872 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6873 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6874 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6876 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6877 lpfc_destroy_vport_work_array(phba, vports);
3104 6878
@@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6894 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6895 lpfc_sli_brdrestart(phba);
3122 6896
3123 lpfc_stop_phba_timers(phba); 6897 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6898 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6899 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6900 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6902 lpfc_debugfs_terminate(vport);
3129 6903
3130 /* Disable interrupt */ 6904 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6905 lpfc_sli_disable_intr(phba);
3132 6906
3133 pci_set_drvdata(pdev, NULL); 6907 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6908 scsi_host_put(shost);
@@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6912 * corresponding pools here.
3139 */ 6913 */
3140 lpfc_scsi_free(phba); 6914 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6915 lpfc_mem_free_all(phba);
3142 6916
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6925 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6926 iounmap(phba->slim_memmap_p);
3153 6927
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6928 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6929
3158 pci_release_selected_regions(pdev, bars); 6930 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6931 pci_disable_device(pdev);
3160} 6932}
3161 6933
3162/** 6934/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6936 * @pdev: pointer to PCI device
3165 * @msg: power management message 6937 * @msg: power management message
3166 * 6938 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6939 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6940 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6941 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6942 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6943 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6944 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6946 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6947 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
3177 * 6950 *
3178 * Return code 6951 * Return code
3179 * 0 - driver suspended the device 6952 * 0 - driver suspended the device
3180 * Error otherwise 6953 * Error otherwise
3181 **/ 6954 **/
3182static int 6955static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6956lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6957{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6967 kthread_stop(phba->worker_thread);
3195 6968
3196 /* Disable interrupt from device */ 6969 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6970 lpfc_sli_disable_intr(phba);
3198 6971
3199 /* Save device state to PCI config space */ 6972 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6973 pci_save_state(pdev);
@@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6977}
3205 6978
3206/** 6979/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6981 * @pdev: pointer to PCI device
3209 * 6982 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6983 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6985 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6986 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6987 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6990 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
3219 * 6993 *
3220 * Return code 6994 * Return code
3221 * 0 - driver suspended the device 6995 * 0 - driver suspended the device
3222 * Error otherwise 6996 * Error otherwise
3223 **/ 6997 **/
3224static int 6998static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6999lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 7000{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7024 }
3251 7025
3252 /* Configure and enable interrupt */ 7026 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7028 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7030 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7043}
3270 7044
3271/** 7045/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7047 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7048 * @state: the current PCI connection state.
3275 * 7049 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7050 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7051 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7052 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7053 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7054 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7056 * as desired.
3282 * 7057 *
3283 * Return codes 7058 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7061 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7062static pci_ers_result_t
3288 pci_channel_state_t state) 7063lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7064{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7087 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7088
3314 /* Disable interrupt */ 7089 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7090 lpfc_sli_disable_intr(phba);
3316 7091
3317 /* Request a slot reset. */ 7092 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7093 return PCI_ERS_RESULT_NEED_RESET;
3319} 7094}
3320 7095
3321/** 7096/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7098 * @pdev: pointer to PCI device.
3324 * 7099 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7100 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7101 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7103 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7105 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7106 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
3333 * 7109 *
3334 * Return codes 7110 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7113 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7114static pci_ers_result_t
7115lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7116{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7131 pci_set_master(pdev);
3355 7132
3356 spin_lock_irq(&phba->hbalock); 7133 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7135 spin_unlock_irq(&phba->hbalock);
3359 7136
3360 /* Configure and enable interrupt */ 7137 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7139 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7141 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7155}
3379 7156
3380/** 7157/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7159 * @pdev: pointer to PCI device
3383 * 7160 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7161 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7164 * error recovery. After this call, traffic can start to flow from this device
7165 * again.
3388 */ 7166 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7167static void
7168lpfc_io_resume_s3(struct pci_dev *pdev)
7169{
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7172
7173 lpfc_online(phba);
7174}
7175
7176/**
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7179 *
7180 * returns the number of ELS/CT IOCBs to reserve
7181 **/
7182int
7183lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186
7187 if (max_xri <= 100)
7188 return 4;
7189 else if (max_xri <= 256)
7190 return 8;
7191 else if (max_xri <= 512)
7192 return 16;
7193 else if (max_xri <= 1024)
7194 return 32;
7195 else
7196 return 48;
7197}
7198
7199/**
7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7203 *
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7211 * properly.
7212 *
7213 * Return code
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7216 **/
7217static int __devinit
7218lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7219{
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7222 int error;
7223 uint32_t cfg_mode, intr_mode;
7224 int mcnt;
7225
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7228 if (!phba)
7229 return -ENOMEM;
7230
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7233 if (error) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7235 "1409 Failed to enable pci device.\n");
7236 goto out_free_phba;
7237 }
7238
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7241 if (error)
7242 goto out_disable_pci_dev;
7243
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
7246 if (error) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7248 "1410 Failed to set up pci memory space.\n");
7249 goto out_disable_pci_dev;
7250 }
7251
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7254 if (error) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
7258 }
7259
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
7262 if (error) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
7266 }
7267
7268 /* Initialize and populate the iocb list per host */
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
7271 if (error) {
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
7275 }
7276
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7279 if (error) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281 "1414 Failed to set up driver resource.\n");
7282 goto out_free_iocb_list;
7283 }
7284
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7287 if (error) {
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289 "1415 Failed to create scsi host.\n");
7290 goto out_unset_driver_resource;
7291 }
7292
7293 /* Configure sysfs attributes */
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7296 if (error) {
7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298 "1416 Failed to allocate sysfs attr\n");
7299 goto out_destroy_shost;
7300 }
7301
7302 /* Now, trying to enable interrupt and bring up the device */
7303 cfg_mode = phba->cfg_use_msi;
7304 while (true) {
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
7307 /* Configure and enable interrupt */
7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 "0426 Failed to enable interrupt.\n");
7312 error = -ENODEV;
7313 goto out_free_sysfs_attr;
7314 }
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "1421 Failed to set up hba\n");
7319 error = -ENODEV;
7320 goto out_disable_intr;
7321 }
7322
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7324 if (intr_mode != 0)
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7326 LPFC_ACT_INTR_CNT);
7327
7328 /* Check active interrupts received only for MSI/MSI-X */
7329 if (intr_mode == 0 ||
7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7334 break;
7335 }
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7339 intr_mode);
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
7344 }
7345
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
7348
7349 return 0;
7350
7351out_disable_intr:
7352 lpfc_sli4_disable_intr(phba);
7353out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
7355out_destroy_shost:
7356 lpfc_destroy_shost(phba);
7357out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7359out_free_iocb_list:
7360 lpfc_free_iocb_list(phba);
7361out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
7365out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
7367out_free_phba:
7368 lpfc_hba_free(phba);
7369 return error;
7370}
7371
7372/**
7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7374 * @pdev: pointer to PCI device
7375 *
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
7380 **/
7381static void __devexit
7382lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7383{
7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7386 struct lpfc_vport **vports;
7387 struct lpfc_hba *phba = vport->phba;
7388 int i;
7389
7390 /* Mark the device unloading flag */
7391 spin_lock_irq(&phba->hbalock);
7392 vport->load_flag |= FC_UNLOADING;
7393 spin_unlock_irq(&phba->hbalock);
7394
7395 /* Free the HBA sysfs attributes */
7396 lpfc_free_sysfs_attr(vport);
7397
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7400 if (vports != NULL)
7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7404
7405 /* Remove FC host and then SCSI host with the physical port */
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
7408
7409 /* Perform cleanup on the physical port */
7410 lpfc_cleanup(vport);
7411
7412 /*
7413 * Bring down the SLI Layer. This step disables all interrupts,
7414 * clears the rings, discards all mailbox commands, and resets
7415 * the HBA FCoE function.
7416 */
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
7419
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7423
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
7426 */
7427 lpfc_scsi_free(phba);
7428 lpfc_sli4_driver_resource_unset(phba);
7429
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
7432
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
7436
7437 /* Finally, free the driver's device data structure */
7438 lpfc_hba_free(phba);
7439
7440 return;
7441}
7442
7443/**
7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7447 *
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
7458 * according to the @msg provided by the PM.
7459 *
7460 * Return code
7461 * 0 - driver suspended the device
7462 * Error otherwise
7463 **/
7464static int
7465lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7466{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7469
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0298 PCI device Power Management suspend.\n");
7472
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7475 lpfc_offline(phba);
7476 kthread_stop(phba->worker_thread);
7477
7478 /* Disable interrupt from device */
7479 lpfc_sli4_disable_intr(phba);
7480
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7484
7485 return 0;
7486}
7487
7488/**
7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7490 * @pdev: pointer to PCI device
7491 *
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7501 * state.
7502 *
7503 * Return code
7504 * 0 - driver suspended the device
7505 * Error otherwise
7506 **/
7507static int
7508lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7509{
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7512 uint32_t intr_mode;
7513 int error;
7514
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0292 PCI device Power Management resume.\n");
7517
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7523
7524 /* Startup the kernel thread for this host adapter. */
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7530 "0293 PM resume failed to start worker "
7531 "thread: error=x%x.\n", error);
7532 return error;
7533 }
7534
7535 /* Configure and enable interrupt */
7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7537 if (intr_mode == LPFC_INTR_ERROR) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7539 "0294 PM resume Failed to enable interrupt\n");
7540 return -EIO;
7541 } else
7542 phba->intr_mode = intr_mode;
7543
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7546 lpfc_online(phba);
7547
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7550
7551 return 0;
7552}
7553
7554/**
7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
7558 *
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
7565 *
7566 * Return codes
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7569 **/
7570static pci_ers_result_t
7571lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7572{
7573 return PCI_ERS_RESULT_NEED_RESET;
7574}
7575
7576/**
7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7578 * @pdev: pointer to PCI device.
7579 *
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
7589 *
7590 * Return codes
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7593 */
7594static pci_ers_result_t
7595lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7596{
7597 return PCI_ERS_RESULT_RECOVERED;
7598}
7599
7600/**
7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7602 * @pdev: pointer to PCI device
7603 *
7604 * This routine is called from the PCI subsystem for error handling to device
7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7608 * again.
7609 **/
7610static void
7611lpfc_io_resume_s4(struct pci_dev *pdev)
7612{
7613 return;
7614}
7615
7616/**
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7620 *
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7628 * properly.
7629 *
7630 * Return code
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7633 **/
7634static int __devinit
7635lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7636{
7637 int rc;
7638 uint16_t dev_id;
7639
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7641 return -ENODEV;
7642
7643 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7946 { 0 }
3473}; 7947};
3474 7948
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7960 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7961 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7962 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7963 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7964 .err_handler = &lpfc_err_handler,
3491}; 7965};
3492 7966
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b012..954ba57970a3 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc2127..b9b451c09010 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0)
1637 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1638 resid_len = length - alloc_len;
1639 if (resid_len > PAGE_SIZE) {
1640 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1641 PAGE_SIZE);
1642 alloc_len += PAGE_SIZE;
1643 } else {
1644 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1645 resid_len);
1646 alloc_len = length;
1647 }
1648 }
1649
1650 /* Set up main header fields in mailbox command */
1651 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1652 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1653
1654 /* Set up sub-header fields into the first page */
1655 if (pagen > 0) {
1656 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1657 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1658 cfg_shdr->request.request_length =
1659 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1660 }
1661 /* The sub-header is in DMA memory, which needs endian converstion */
1662 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1663 sizeof(union lpfc_sli4_cfg_shdr));
1664
1665 return alloc_len;
1666}
1667
1668/**
1669 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1670 * @phba: pointer to lpfc hba data structure.
1671 * @mbox: pointer to lpfc mbox command.
1672 *
1673 * This routine gets the opcode from a SLI4 specific mailbox command for
1674 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1675 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1676 * returned.
1677 **/
1678uint8_t
1679lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1680{
1681 struct lpfc_mbx_sli4_config *sli4_cfg;
1682 union lpfc_sli4_cfg_shdr *cfg_shdr;
1683
1684 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1685 return 0;
1686 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1687
1688 /* For embedded mbox command, get opcode from embedded sub-header*/
1689 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1690 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1691 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1692 }
1693
1694 /* For non-embedded mbox command, get opcode from first dma page */
1695 if (unlikely(!mbox->sge_array))
1696 return 0;
1697 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1698 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1699}
1700
1701/**
1702 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1703 * @mboxq: pointer to lpfc mbox command.
1704 *
1705 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1706 * mailbox command.
1707 **/
1708void
1709lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1710{
1711 /* Set up SLI4 mailbox command header fields */
1712 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1713 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1714
1715 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717
1718 /* Virtual fabrics and FIPs are not supported yet. */
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1720
1721 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg)
1723 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1724
1725 /* Enable NPIV only if configured to do so. */
1726 if (phba->max_vpi && phba->cfg_enable_npiv)
1727 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1728
1729 return;
1730}
1731
1732/**
1733 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1734 * @mbox: pointer to lpfc mbox command to initialize.
1735 * @vport: Vport associated with the VF.
1736 *
1737 * This routine initializes @mbox to all zeros and then fills in the mailbox
1738 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1739 * in the context of an FCF. The driver issues this command to setup a VFI
1740 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1741 * REG_VFI after a successful VSAN login.
1742 **/
1743void
1744lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1745{
1746 struct lpfc_mbx_init_vfi *init_vfi;
1747
1748 memset(mbox, 0, sizeof(*mbox));
1749 init_vfi = &mbox->u.mqe.un.init_vfi;
1750 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1751 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1752 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1753 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1754 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1755}
1756
1757/**
1758 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1759 * @mbox: pointer to lpfc mbox command to initialize.
1760 * @vport: vport associated with the VF.
1761 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1762 *
1763 * This routine initializes @mbox to all zeros and then fills in the mailbox
1764 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1765 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1766 * fabrics identified by VFI in the context of an FCF.
1767 **/
1768void
1769lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1770{
1771 struct lpfc_mbx_reg_vfi *reg_vfi;
1772
1773 memset(mbox, 0, sizeof(*mbox));
1774 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1775 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1776 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1777 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1778 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1779 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1780 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1781 reg_vfi->bde.addrLow = putPaddrLow(phys);
1782 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1783 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1784 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1785}
1786
1787/**
1788 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1789 * @mbox: pointer to lpfc mbox command to initialize.
1790 * @vpi: VPI to be initialized.
1791 *
1792 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1793 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1794 * with the virtual N Port. The SLI Host issues this command before issuing a
1795 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1796 * successful virtual NPort login.
1797 **/
1798void
1799lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1800{
1801 memset(mbox, 0, sizeof(*mbox));
1802 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1803 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1804}
1805
1806/**
1807 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1808 * @mbox: pointer to lpfc mbox command to initialize.
1809 * @vfi: VFI to be unregistered.
1810 *
1811 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1812 * (logical NPort) into the inactive state. The SLI Host must have logged out
1813 * and unregistered all remote N_Ports to abort any activity on the virtual
1814 * fabric. The SLI Port posts the mailbox response after marking the virtual
1815 * fabric inactive.
1816 **/
1817void
1818lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1819{
1820 memset(mbox, 0, sizeof(*mbox));
1821 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1822 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1823}
1824
1825/**
1826 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1827 * @phba: pointer to the hba structure containing.
1828 * @mbox: pointer to lpfc mbox command to initialize.
1829 *
1830 * This function create a SLI4 dump mailbox command to dump FCoE
1831 * parameters stored in region 23.
1832 **/
1833int
1834lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1835 struct lpfcMboxq *mbox)
1836{
1837 struct lpfc_dmabuf *mp = NULL;
1838 MAILBOX_t *mb;
1839
1840 memset(mbox, 0, sizeof(*mbox));
1841 mb = &mbox->u.mb;
1842
1843 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1844 if (mp)
1845 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1846
1847 if (!mp || !mp->virt) {
1848 kfree(mp);
1849 /* dump_fcoe_param failed to allocate memory */
1850 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1851 "2569 lpfc_dump_fcoe_param: memory"
1852 " allocation failed \n");
1853 return 1;
1854 }
1855
1856 memset(mp->virt, 0, LPFC_BPL_SIZE);
1857 INIT_LIST_HEAD(&mp->list);
1858
1859 /* save address for completion */
1860 mbox->context1 = (uint8_t *) mp;
1861
1862 mb->mbxCommand = MBX_DUMP_MEMORY;
1863 mb->un.varDmp.type = DMP_NV_PARAMS;
1864 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1865 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1866 mb->un.varWords[3] = putPaddrLow(mp->phys);
1867 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1868 return 0;
1869}
1870
1871/**
1872 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1873 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1874 * @mbox: pointer to lpfc mbox command to initialize.
1875 *
1876 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1877 * SLI Host uses the command to activate an FCF after it has acquired FCF
1878 * information via a READ_FCF mailbox command. This mailbox command also is used
1879 * to indicate where received unsolicited frames from this FCF will be sent. By
1880 * default this routine will set up the FCF to forward all unsolicited frames
1881 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1882 * more complicated setups.
1883 **/
1884void
1885lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1886{
1887 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1888
1889 memset(mbox, 0, sizeof(*mbox));
1890 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1891 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1892 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1893 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1894 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1895 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1896 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1897 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1898 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1899 (~phba->fcf.addr_mode) & 0x3);
1900 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1901 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1902 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1903 }
1904}
1905
1906/**
1907 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1908 * @mbox: pointer to lpfc mbox command to initialize.
1909 * @fcfi: FCFI to be unregistered.
1910 *
1911 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1912 * The SLI Host uses the command to inactivate an FCFI.
1913 **/
1914void
1915lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1916{
1917 memset(mbox, 0, sizeof(*mbox));
1918 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1919 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1920}
1921
1922/**
1923 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1924 * @mbox: pointer to lpfc mbox command to initialize.
1925 * @ndlp: The nodelist structure that describes the RPI to resume.
1926 *
1927 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1928 * link event.
1929 **/
1930void
1931lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1932{
1933 struct lpfc_mbx_resume_rpi *resume_rpi;
1934
1935 memset(mbox, 0, sizeof(*mbox));
1936 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1937 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1938 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1939 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1940 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1941 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1942 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1943}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a976733398..e198c917c13e 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41c..09f659f77bb3 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 8032c5adb6a9..7991ba1980ae 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 329
326 vports = lpfc_create_vport_work_array(phba); 330 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 331 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 332 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 333 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 334 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 335 new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 383
380 vports = lpfc_create_vport_work_array(phba); 384 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 385 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 387 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 388 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 389 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 431
428 vports = lpfc_create_vport_work_array(phba); 432 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 433 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 434 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 435 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 436 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 437 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 442}
439 443
440/** 444/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 445 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 446 * @vport: The virtual port for which this call being executed.
447 * @num_to_allocate: The requested number of buffers to allocate.
443 * 448 *
444 * This routine allocates a scsi buffer, which contains all the necessary 449 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 450 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 451 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 452 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 453 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 454 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 455 *
451 * Return codes: 456 * Return codes:
452 * NULL - Error 457 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 458 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 459 **/
455static struct lpfc_scsi_buf * 460static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 461lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 462{
458 struct lpfc_hba *phba = vport->phba; 463 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 464 struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 468 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 469 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 470 uint16_t iotag;
471 int bcnt;
466 472
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 473 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 474 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 475 if (!psb)
476 break;
477
478 /*
479 * Get memory from the pci pool to map the virt space to pci
480 * bus space for an I/O. The DMA buffer includes space for the
481 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
482 * necessary to support the sg_tablesize.
483 */
484 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
485 GFP_KERNEL, &psb->dma_handle);
486 if (!psb->data) {
487 kfree(psb);
488 break;
489 }
490
491 /* Initialize virtual ptrs to dma_buf region. */
492 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
493
494 /* Allocate iotag for psb->cur_iocbq. */
495 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
496 if (iotag == 0) {
497 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
498 psb->data, psb->dma_handle);
499 kfree(psb);
500 break;
501 }
502 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
503
504 psb->fcp_cmnd = psb->data;
505 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
506 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508
509 /* Initialize local short-hand pointers. */
510 bpl = psb->fcp_bpl;
511 pdma_phys_fcp_cmd = psb->dma_handle;
512 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
513 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
514 sizeof(struct fcp_rsp);
515
516 /*
517 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
518 * are sg list bdes. Initialize the first two and leave the
519 * rest for queuecommand.
520 */
521 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
522 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
523 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
524 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
526
527 /* Setup the physical region for the FCP RSP */
528 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
529 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
530 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
531 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
532 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
533
534 /*
535 * Since the IOCB for the FCP I/O is built into this
536 * lpfc_scsi_buf, initialize it with all known data now.
537 */
538 iocb = &psb->cur_iocbq.iocb;
539 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
540 if ((phba->sli_rev == 3) &&
541 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
542 /* fill in immediate fcp command BDE */
543 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
544 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
545 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
546 unsli3.fcp_ext.icd);
547 iocb->un.fcpi64.bdl.addrHigh = 0;
548 iocb->ulpBdeCount = 0;
549 iocb->ulpLe = 0;
550 /* fill in responce BDE */
551 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
552 BUFF_TYPE_BDE_64;
553 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
554 sizeof(struct fcp_rsp);
555 iocb->unsli3.fcp_ext.rbde.addrLow =
556 putPaddrLow(pdma_phys_fcp_rsp);
557 iocb->unsli3.fcp_ext.rbde.addrHigh =
558 putPaddrHigh(pdma_phys_fcp_rsp);
559 } else {
560 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
561 iocb->un.fcpi64.bdl.bdeSize =
562 (2 * sizeof(struct ulp_bde64));
563 iocb->un.fcpi64.bdl.addrLow =
564 putPaddrLow(pdma_phys_bpl);
565 iocb->un.fcpi64.bdl.addrHigh =
566 putPaddrHigh(pdma_phys_bpl);
567 iocb->ulpBdeCount = 1;
568 iocb->ulpLe = 1;
569 }
570 iocb->ulpClass = CLASS3;
571 psb->status = IOSTAT_SUCCESS;
572 /* Put it back into the SCSI buffer list */
573 lpfc_release_scsi_buf_s4(phba, psb);
470 574
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 575 }
483 576
484 /* Initialize virtual ptrs to dma_buf region. */ 577 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 578}
486 579
487 /* Allocate iotag for psb->cur_iocbq. */ 580/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 581 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 582 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 583 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 584 *
492 kfree (psb); 585 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 586 * FCP aborted xri.
587 **/
588void
589lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
590 struct sli4_wcqe_xri_aborted *axri)
591{
592 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
593 struct lpfc_scsi_buf *psb, *next_psb;
594 unsigned long iflag = 0;
595
596 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
597 list_for_each_entry_safe(psb, next_psb,
598 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
599 if (psb->cur_iocbq.sli4_xritag == xri) {
600 list_del(&psb->list);
601 psb->status = IOSTAT_SUCCESS;
602 spin_unlock_irqrestore(
603 &phba->sli4_hba.abts_scsi_buf_list_lock,
604 iflag);
605 lpfc_release_scsi_buf_s4(phba, psb);
606 return;
607 }
608 }
609 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
610 iflag);
611}
612
613/**
614 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
615 * @phba: pointer to lpfc hba data structure.
616 *
617 * This routine walks the list of scsi buffers that have been allocated and
618 * repost them to the HBA by using SGL block post. This is needed after a
619 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
620 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
621 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
622 *
623 * Returns: 0 = success, non-zero failure.
624 **/
625int
626lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
627{
628 struct lpfc_scsi_buf *psb;
629 int index, status, bcnt = 0, rcnt = 0, rc = 0;
630 LIST_HEAD(sblist);
631
632 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
633 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
634 if (psb) {
635 /* Remove from SCSI buffer list */
636 list_del(&psb->list);
637 /* Add it to a local SCSI buffer list */
638 list_add_tail(&psb->list, &sblist);
639 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
640 bcnt = rcnt;
641 rcnt = 0;
642 }
643 } else
644 /* A hole present in the XRI array, need to skip */
645 bcnt = rcnt;
646
647 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
648 /* End of XRI array for SCSI buffer, complete */
649 bcnt = rcnt;
650
651 /* Continue until collect up to a nembed page worth of sgls */
652 if (bcnt == 0)
653 continue;
654 /* Now, post the SCSI buffer list sgls as a block */
655 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
656 /* Reset SCSI buffer count for next round of posting */
657 bcnt = 0;
658 while (!list_empty(&sblist)) {
659 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
660 list);
661 if (status) {
662 /* Put this back on the abort scsi list */
663 psb->status = IOSTAT_LOCAL_REJECT;
664 psb->result = IOERR_ABORT_REQUESTED;
665 rc++;
666 } else
667 psb->status = IOSTAT_SUCCESS;
668 /* Put it back into the SCSI buffer list */
669 lpfc_release_scsi_buf_s4(phba, psb);
670 }
494 } 671 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 672 return rc;
673}
496 674
497 psb->fcp_cmnd = psb->data; 675/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 676 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 677 * @vport: The virtual port for which this call being executed.
500 sizeof(struct fcp_rsp); 678 * @num_to_allocate: The requested number of buffers to allocate.
679 *
680 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
681 * the scsi buffer contains all the necessary information needed to initiate
682 * a SCSI I/O.
683 *
684 * Return codes:
685 * int - number of scsi buffers that were allocated.
686 * 0 = failure, less than num_to_alloc is a partial failure.
687 **/
688static int
689lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
690{
691 struct lpfc_hba *phba = vport->phba;
692 struct lpfc_scsi_buf *psb;
693 struct sli4_sge *sgl;
694 IOCB_t *iocb;
695 dma_addr_t pdma_phys_fcp_cmd;
696 dma_addr_t pdma_phys_fcp_rsp;
697 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
698 uint16_t iotag, last_xritag = NO_XRI;
699 int status = 0, index;
700 int bcnt;
701 int non_sequential_xri = 0;
702 int rc = 0;
703 LIST_HEAD(sblist);
704
705 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
706 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
707 if (!psb)
708 break;
501 709
502 /* Initialize local short-hand pointers. */ 710 /*
503 bpl = psb->fcp_bpl; 711 * Get memory from the pci pool to map the virt space to pci bus
504 pdma_phys_fcp_cmd = psb->dma_handle; 712 * space for an I/O. The DMA buffer includes space for the
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 713 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 714 * necessary to support the sg_tablesize.
507 sizeof(struct fcp_rsp); 715 */
716 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
717 GFP_KERNEL, &psb->dma_handle);
718 if (!psb->data) {
719 kfree(psb);
720 break;
721 }
508 722
509 /* 723 /* Initialize virtual ptrs to dma_buf region. */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 724 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526 725
527 /* 726 /* Allocate iotag for psb->cur_iocbq. */
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 727 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
529 * initialize it with all known data now. 728 if (iotag == 0) {
530 */ 729 kfree(psb);
531 iocb = &psb->cur_iocbq.iocb; 730 break;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 731 }
533 if ((phba->sli_rev == 3) && 732
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 733 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
535 /* fill in immediate fcp command BDE */ 734 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 735 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
736 psb->data, psb->dma_handle);
737 kfree(psb);
738 break;
739 }
740 if (last_xritag != NO_XRI
741 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
742 non_sequential_xri = 1;
743 } else
744 list_add_tail(&psb->list, &sblist);
745 last_xritag = psb->cur_iocbq.sli4_xritag;
746
747 index = phba->sli4_hba.scsi_xri_cnt++;
748 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
749
750 psb->fcp_bpl = psb->data;
751 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
752 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
753 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
754 sizeof(struct fcp_cmnd));
755
756 /* Initialize local short-hand pointers. */
757 sgl = (struct sli4_sge *)psb->fcp_bpl;
758 pdma_phys_bpl = psb->dma_handle;
759 pdma_phys_fcp_cmd =
760 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
761 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
762 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
763
764 /*
765 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
766 * are sg list bdes. Initialize the first two and leave the
767 * rest for queuecommand.
768 */
769 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
770 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
771 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
772 bf_set(lpfc_sli4_sge_last, sgl, 0);
773 sgl->word2 = cpu_to_le32(sgl->word2);
774 sgl->word3 = cpu_to_le32(sgl->word3);
775 sgl++;
776
777 /* Setup the physical region for the FCP RSP */
778 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
779 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
780 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
781 bf_set(lpfc_sli4_sge_last, sgl, 1);
782 sgl->word2 = cpu_to_le32(sgl->word2);
783 sgl->word3 = cpu_to_le32(sgl->word3);
784
785 /*
786 * Since the IOCB for the FCP I/O is built into this
787 * lpfc_scsi_buf, initialize it with all known data now.
788 */
789 iocb = &psb->cur_iocbq.iocb;
790 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
791 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
792 /* setting the BLP size to 2 * sizeof BDE may not be correct.
793 * We are setting the bpl to point to out sgl. An sgl's
794 * entries are 16 bytes, a bpl entries are 12 bytes.
795 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 796 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 797 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 798 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 799 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 800 iocb->ulpLe = 1;
801 iocb->ulpClass = CLASS3;
802 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
803 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
804 else
805 pdma_phys_bpl1 = 0;
806 psb->dma_phys_bpl = pdma_phys_bpl;
807 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
808 if (non_sequential_xri) {
809 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
810 pdma_phys_bpl1,
811 psb->cur_iocbq.sli4_xritag);
812 if (status) {
813 /* Put this back on the abort scsi list */
814 psb->status = IOSTAT_LOCAL_REJECT;
815 psb->result = IOERR_ABORT_REQUESTED;
816 rc++;
817 } else
818 psb->status = IOSTAT_SUCCESS;
819 /* Put it back into the SCSI buffer list */
820 lpfc_release_scsi_buf_s4(phba, psb);
821 break;
822 }
823 }
824 if (bcnt) {
825 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
826 /* Reset SCSI buffer count for next round of posting */
827 while (!list_empty(&sblist)) {
828 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
829 list);
830 if (status) {
831 /* Put this back on the abort scsi list */
832 psb->status = IOSTAT_LOCAL_REJECT;
833 psb->result = IOERR_ABORT_REQUESTED;
834 rc++;
835 } else
836 psb->status = IOSTAT_SUCCESS;
837 /* Put it back into the SCSI buffer list */
838 lpfc_release_scsi_buf_s4(phba, psb);
839 }
558 } 840 }
559 iocb->ulpClass = CLASS3;
560 841
561 return psb; 842 return bcnt + non_sequential_xri - rc;
562} 843}
563 844
564/** 845/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 846 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 847 * @vport: The virtual port for which this call being executed.
848 * @num_to_allocate: The requested number of buffers to allocate.
849 *
850 * This routine wraps the actual SCSI buffer allocator function pointer from
851 * the lpfc_hba struct.
852 *
853 * Return codes:
854 * int - number of scsi buffers that were allocated.
855 * 0 = failure, less than num_to_alloc is a partial failure.
856 **/
857static inline int
858lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
859{
860 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
861}
862
863/**
864 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
865 * @phba: The HBA for which this call is being executed.
567 * 866 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 867 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 868 * and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 890}
592 891
593/** 892/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 893 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 894 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 895 * @psb: The scsi buffer which is being released.
597 * 896 *
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 898 * lpfc_scsi_buf_list list.
600 **/ 899 **/
601static void 900static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 901lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 902{
604 unsigned long iflag = 0; 903 unsigned long iflag = 0;
605 904
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 909}
611 910
612/** 911/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 912 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
913 * @phba: The Hba for which this call is being executed.
914 * @psb: The scsi buffer which is being released.
915 *
916 * This routine releases @psb scsi buffer by adding it to tail of @phba
917 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
918 * and cannot be reused for at least RA_TOV amount of time if it was
919 * aborted.
920 **/
921static void
922lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923{
924 unsigned long iflag = 0;
925
926 if (psb->status == IOSTAT_LOCAL_REJECT
927 && psb->result == IOERR_ABORT_REQUESTED) {
928 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
929 iflag);
930 psb->pCmd = NULL;
931 list_add_tail(&psb->list,
932 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
933 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
934 iflag);
935 } else {
936
937 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
938 psb->pCmd = NULL;
939 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
940 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
941 }
942}
943
944/**
945 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
946 * @phba: The Hba for which this call is being executed.
947 * @psb: The scsi buffer which is being released.
948 *
949 * This routine releases @psb scsi buffer by adding it to tail of @phba
950 * lpfc_scsi_buf_list list.
951 **/
952static void
953lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
954{
955
956 phba->lpfc_release_scsi_buf(phba, psb);
957}
958
959/**
960 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 961 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 962 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 963 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 964 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 965 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 966 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 967 * IOCB fields which are dependent on scsi command request buffer.
621 * 968 *
622 * Return codes: 969 * Return codes:
623 * 1 - Error 970 * 1 - Error
624 * 0 - Success 971 * 0 - Success
625 **/ 972 **/
626static int 973static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 974lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 975{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 976 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 977 struct scatterlist *sgel = NULL;
@@ -827,8 +1174,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
827 * @reftag: out: ref tag (reference tag) 1174 * @reftag: out: ref tag (reference tag)
828 * 1175 *
829 * Description: 1176 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise, 1177 * Extract DIF parameters from the command if possible. Otherwise,
831 * use default paratmers. 1178 * use default parameters.
832 * 1179 *
833 **/ 1180 **/
834static inline void 1181static inline void
@@ -1412,6 +1759,133 @@ out:
1412} 1759}
1413 1760
1414/** 1761/**
1762 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1763 * @phba: The Hba for which this call is being executed.
1764 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1765 *
1766 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1767 * field of @lpfc_cmd for device with SLI-4 interface spec.
1768 *
1769 * Return codes:
1770 * 1 - Error
1771 * 0 - Success
1772 **/
1773static int
1774lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1775{
1776 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1777 struct scatterlist *sgel = NULL;
1778 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1779 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1780 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1781 dma_addr_t physaddr;
1782 uint32_t num_bde = 0;
1783 uint32_t dma_len;
1784 uint32_t dma_offset = 0;
1785 int nseg;
1786
1787 /*
1788 * There are three possibilities here - use scatter-gather segment, use
1789 * the single mapping, or neither. Start the lpfc command prep by
1790 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1791 * data bde entry.
1792 */
1793 if (scsi_sg_count(scsi_cmnd)) {
1794 /*
1795 * The driver stores the segment count returned from pci_map_sg
1796 * because this a count of dma-mappings used to map the use_sg
1797 * pages. They are not guaranteed to be the same for those
1798 * architectures that implement an IOMMU.
1799 */
1800
1801 nseg = scsi_dma_map(scsi_cmnd);
1802 if (unlikely(!nseg))
1803 return 1;
1804 sgl += 1;
1805 /* clear the last flag in the fcp_rsp map entry */
1806 sgl->word2 = le32_to_cpu(sgl->word2);
1807 bf_set(lpfc_sli4_sge_last, sgl, 0);
1808 sgl->word2 = cpu_to_le32(sgl->word2);
1809 sgl += 1;
1810
1811 lpfc_cmd->seg_cnt = nseg;
1812 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1813 printk(KERN_ERR "%s: Too many sg segments from "
1814 "dma_map_sg. Config %d, seg_cnt %d\n",
1815 __func__, phba->cfg_sg_seg_cnt,
1816 lpfc_cmd->seg_cnt);
1817 scsi_dma_unmap(scsi_cmnd);
1818 return 1;
1819 }
1820
1821 /*
1822 * The driver established a maximum scatter-gather segment count
1823 * during probe that limits the number of sg elements in any
1824 * single scsi command. Just run through the seg_cnt and format
1825 * the sge's.
1826 * When using SLI-3 the driver will try to fit all the BDEs into
1827 * the IOCB. If it can't then the BDEs get added to a BPL as it
1828 * does for SLI-2 mode.
1829 */
1830 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1831 physaddr = sg_dma_address(sgel);
1832 dma_len = sg_dma_len(sgel);
1833 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1834 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1835 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1836 if ((num_bde + 1) == nseg)
1837 bf_set(lpfc_sli4_sge_last, sgl, 1);
1838 else
1839 bf_set(lpfc_sli4_sge_last, sgl, 0);
1840 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1841 sgl->word2 = cpu_to_le32(sgl->word2);
1842 sgl->word3 = cpu_to_le32(sgl->word3);
1843 dma_offset += dma_len;
1844 sgl++;
1845 }
1846 } else {
1847 sgl += 1;
1848 /* clear the last flag in the fcp_rsp map entry */
1849 sgl->word2 = le32_to_cpu(sgl->word2);
1850 bf_set(lpfc_sli4_sge_last, sgl, 1);
1851 sgl->word2 = cpu_to_le32(sgl->word2);
1852 }
1853
1854 /*
1855 * Finish initializing those IOCB fields that are dependent on the
1856 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1857 * explicitly reinitialized.
1858 * all iocb memory resources are reused.
1859 */
1860 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1861
1862 /*
1863 * Due to difference in data length between DIF/non-DIF paths,
1864 * we need to set word 4 of IOCB here
1865 */
1866 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1867 return 0;
1868}
1869
1870/**
1871 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1872 * @phba: The Hba for which this call is being executed.
1873 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1874 *
1875 * This routine wraps the actual DMA mapping function pointer from the
1876 * lpfc_hba struct.
1877 *
1878 * Return codes:
1879 * 1 - Error
1880 * 0 - Success
1881 **/
1882static inline int
1883lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1884{
1885 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1886}
1887
1888/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1889 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1890 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1891 * @vport: Pointer to vport object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1978}
1505 1979
1506/** 1980/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
1508 * @phba: The Hba for which this call is being executed. 1982 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 1983 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 1984 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 1985 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 1986 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 1987 **/
1514static void 1988static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 1990{
1517 /* 1991 /*
1518 * There are only two special cases to consider. (1) the scsi command 1992 * There are only two special cases to consider. (1) the scsi command
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1529} 2003}
1530 2004
1531/** 2005/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
1532 * lpfc_handler_fcp_err - FCP response handler 2036 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed. 2037 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2180 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2181 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2182 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2183 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2184 *
1681 * This routine assigns scsi command result by looking into response IOCB 2185 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2186 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2461}
1958 2462
1959/** 2463/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
1961 * @vport: The virtual port for which this call is being executed. 2465 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2466 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2467 * @pnode: Pointer to lpfc_nodelist.
1964 * 2468 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2469 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2470 * to transfer for device with SLI3 interface spec.
1967 **/ 2471 **/
1968static void 2472static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode) 2474 struct lpfc_nodelist *pnode)
1971{ 2475{
1972 struct lpfc_hba *phba = vport->phba; 2476 struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2517 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2518 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2519 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2520 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2521 iocb_cmd->un.fcpi.fcpi_parm = 0;
2522 iocb_cmd->ulpPU = 0;
2523 } else
2524 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2525 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2526 phba->fc4OutputRequests++;
2020 } else { 2527 } else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2558}
2052 2559
2053/** 2560/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2601 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2603 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2604 * @task_mgmt_cmd: SCSI task management command.
2059 * 2605 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2606 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2607 * for device with SLI-3 interface spec.
2061 * 2608 *
2062 * Return codes: 2609 * Return codes:
2063 * 0 - Error 2610 * 0 - Error
2064 * 1 - Success 2611 * 1 - Success
2065 **/ 2612 **/
2066static int 2613static int
2067lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2068 struct lpfc_scsi_buf *lpfc_cmd, 2615 struct lpfc_scsi_buf *lpfc_cmd,
2069 unsigned int lun, 2616 unsigned int lun,
2070 uint8_t task_mgmt_cmd) 2617 uint8_t task_mgmt_cmd)
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2114} 2661}
2115 2662
2116/** 2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693
2694/**
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2717 task_mgmt_cmd);
2718}
2719
2720/**
2721 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2722 * @phba: The hba struct for which this call is being executed.
2723 * @dev_grp: The HBA PCI-Device group number.
2724 *
2725 * This routine sets up the SCSI interface API function jump table in @phba
2726 * struct.
2727 * Returns: 0 - success, -ENODEV - failure.
2728 **/
2729int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{
2732
2733 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break;
2743 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break;
2752 default:
2753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2754 "1418 Invalid HBA PCI-device group: 0x%x\n",
2755 dev_grp);
2756 return -ENODEV;
2757 break;
2758 }
2759 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2760 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2761 return 0;
2762}
2763
2764/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2765 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2766 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2767 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba, 2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2182 &phba->sli.ring[phba->sli.fcp_ring], 2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) { 2831 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) { 2832 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2952 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2953 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2954 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2955 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2956 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2957 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3073 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 3074
2429 atomic_inc(&ndlp->cmd_pending); 3075 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 3076 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3077 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 3078 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 3079 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 3136 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 3138 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 3139 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 3140 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 3141 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3176 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3177 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3178 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3179 if (phba->sli_rev == LPFC_SLI_REV4)
3180 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3181 else
3182 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3183
2536 icmd->ulpLe = 1; 3184 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3185 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3190
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3191 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3192 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3193 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3194 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3195 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3196 ret = FAILED;
2548 goto out; 3197 goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2668 "0703 Issue target reset to TGT %d LUN %d " 3317 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671 status = lpfc_sli_issue_iocb_wait(phba, 3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3321 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3322 if (status == IOCB_TIMEDOUT) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3473{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3474 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3475 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3476 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3477 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3478 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3479 int num_allocated = 0;
2833 3480
2834 if (!rport || fc_remote_port_chkready(rport)) 3481 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3482 return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3510 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3511 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3512 }
2866 3513 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3514 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3515 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3516 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3517 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3518 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3519 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3520 }
2881 return 0; 3521 return 0;
2882} 3522}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa29..65dfc8bd5b49 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba4..ff04daf18f48 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE)
4143 return -EIO;
4144
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4147 kfree(mp);
4148 return 0;
4149}
4150
4151/**
4152 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4153 * @phba: pointer to lpfc hba data structure.
4154 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4155 * @vpd: pointer to the memory to hold resulting port vpd data.
4156 * @vpd_size: On input, the number of bytes allocated to @vpd.
4157 * On output, the number of data bytes in @vpd.
4158 *
4159 * This routine executes a READ_REV SLI4 mailbox command. In
4160 * addition, this routine gets the port vpd data.
4161 *
4162 * Return codes
4163 * 0 - sucessful
4164 * ENOMEM - could not allocated memory.
4165 **/
4166static int
4167lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4168 uint8_t *vpd, uint32_t *vpd_size)
4169{
4170 int rc = 0;
4171 uint32_t dma_size;
4172 struct lpfc_dmabuf *dmabuf;
4173 struct lpfc_mqe *mqe;
4174
4175 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4176 if (!dmabuf)
4177 return -ENOMEM;
4178
4179 /*
4180 * Get a DMA buffer for the vpd data resulting from the READ_REV
4181 * mailbox command.
4182 */
4183 dma_size = *vpd_size;
4184 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4185 dma_size,
4186 &dmabuf->phys,
4187 GFP_KERNEL);
4188 if (!dmabuf->virt) {
4189 kfree(dmabuf);
4190 return -ENOMEM;
4191 }
4192 memset(dmabuf->virt, 0, dma_size);
4193
4194 /*
4195 * The SLI4 implementation of READ_REV conflicts at word1,
4196 * bits 31:16 and SLI4 adds vpd functionality not present
4197 * in SLI3. This code corrects the conflicts.
4198 */
4199 lpfc_read_rev(phba, mboxq);
4200 mqe = &mboxq->u.mqe;
4201 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4202 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4203 mqe->un.read_rev.word1 &= 0x0000FFFF;
4204 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4205 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4206
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc) {
4209 dma_free_coherent(&phba->pcidev->dev, dma_size,
4210 dmabuf->virt, dmabuf->phys);
4211 return -EIO;
4212 }
4213
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /*
4236 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than
4238 * case and update the caller's size.
4239 */
4240 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4241 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4242
4243 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4244 dma_free_coherent(&phba->pcidev->dev, dma_size,
4245 dmabuf->virt, dmabuf->phys);
4246 kfree(dmabuf);
4247 return 0;
4248}
4249
4250/**
4251 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4252 * @phba: pointer to lpfc hba data structure.
4253 *
4254 * This routine is called to explicitly arm the SLI4 device's completion and
4255 * event queues
4256 **/
4257static void
4258lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4259{
4260 uint8_t fcp_eqidx;
4261
4262 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4263 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4264 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4265 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4266 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4267 LPFC_QUEUE_REARM);
4268 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4269 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4270 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4271 LPFC_QUEUE_REARM);
4272}
4273
4274/**
4275 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4276 * @phba: Pointer to HBA context object.
4277 *
4278 * This function is the main SLI4 device intialization PCI function. This
4279 * function is called by the HBA intialization code, HBA reset code and
4280 * HBA error attention handler code. Caller is not required to hold any
4281 * locks.
4282 **/
4283int
4284lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4285{
4286 int rc;
4287 LPFC_MBOXQ_t *mboxq;
4288 struct lpfc_mqe *mqe;
4289 uint8_t *vpd;
4290 uint32_t vpd_size;
4291 uint32_t ftr_rsp = 0;
4292 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4293 struct lpfc_vport *vport = phba->pport;
4294 struct lpfc_dmabuf *mp;
4295
4296 /* Perform a PCI function reset to start from clean */
4297 rc = lpfc_pci_function_reset(phba);
4298 if (unlikely(rc))
4299 return -ENODEV;
4300
4301 /* Check the HBA Host Status Register for readyness */
4302 rc = lpfc_sli4_post_status_check(phba);
4303 if (unlikely(rc))
4304 return -ENODEV;
4305 else {
4306 spin_lock_irq(&phba->hbalock);
4307 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4308 spin_unlock_irq(&phba->hbalock);
4309 }
4310
4311 /*
4312 * Allocate a single mailbox container for initializing the
4313 * port.
4314 */
4315 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4316 if (!mboxq)
4317 return -ENOMEM;
4318
4319 /*
4320 * Continue initialization with default values even if driver failed
4321 * to read FCoE param config regions
4322 */
4323 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4325 "2570 Failed to read FCoE parameters \n");
4326
4327 /* Issue READ_REV to collect vpd and FW information. */
4328 vpd_size = PAGE_SIZE;
4329 vpd = kzalloc(vpd_size, GFP_KERNEL);
4330 if (!vpd) {
4331 rc = -ENOMEM;
4332 goto out_free_mbox;
4333 }
4334
4335 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4336 if (unlikely(rc))
4337 goto out_free_vpd;
4338
4339 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) ||
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO;
4349 goto out_free_vpd;
4350 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /*
4356 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure
4358 * is not fatal as the driver will use generic values.
4359 */
4360 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4361 if (unlikely(!rc)) {
4362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4363 "0377 Error %d parsing vpd. "
4364 "Using defaults.\n", rc);
4365 rc = 0;
4366 }
4367
4368 /* By now, we should determine the SLI revision, hard code for now */
4369 phba->sli_rev = LPFC_SLI_REV4;
4370
4371 /*
4372 * Discover the port's supported feature set and match it against the
4373 * hosts requests.
4374 */
4375 lpfc_request_features(phba, mboxq);
4376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4377 if (unlikely(rc)) {
4378 rc = -EIO;
4379 goto out_free_vpd;
4380 }
4381
4382 /*
4383 * The port must support FCP initiator mode as this is the
4384 * only mode running in the host.
4385 */
4386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4388 "0378 No support for fcpi mode.\n");
4389 ftr_rsp++;
4390 }
4391
4392 /*
4393 * If the port cannot support the host's requested features
4394 * then turn off the global config parameters to disable the
4395 * feature in the driver. This is not a fatal error.
4396 */
4397 if ((phba->cfg_enable_bg) &&
4398 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4399 ftr_rsp++;
4400
4401 if (phba->max_vpi && phba->cfg_enable_npiv &&
4402 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4403 ftr_rsp++;
4404
4405 if (ftr_rsp) {
4406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4407 "0379 Feature Mismatch Data: x%08x %08x "
4408 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4409 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4410 phba->cfg_enable_npiv, phba->max_vpi);
4411 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4412 phba->cfg_enable_bg = 0;
4413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4414 phba->cfg_enable_npiv = 0;
4415 }
4416
4417 /* These SLI3 features are assumed in SLI4 */
4418 spin_lock_irq(&phba->hbalock);
4419 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4420 spin_unlock_irq(&phba->hbalock);
4421
4422 /* Read the port's service parameters. */
4423 lpfc_read_sparam(phba, mboxq, vport->vpi);
4424 mboxq->vport = vport;
4425 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4426 mp = (struct lpfc_dmabuf *) mboxq->context1;
4427 if (rc == MBX_SUCCESS) {
4428 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4429 rc = 0;
4430 }
4431
4432 /*
4433 * This memory was allocated by the lpfc_read_sparam routine. Release
4434 * it to the mbuf pool.
4435 */
4436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4437 kfree(mp);
4438 mboxq->context1 = NULL;
4439 if (unlikely(rc)) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4441 "0382 READ_SPARAM command failed "
4442 "status %d, mbxStatus x%x\n",
4443 rc, bf_get(lpfc_mqe_status, mqe));
4444 phba->link_state = LPFC_HBA_ERROR;
4445 rc = -EIO;
4446 goto out_free_vpd;
4447 }
4448
4449 if (phba->cfg_soft_wwnn)
4450 u64_to_wwn(phba->cfg_soft_wwnn,
4451 vport->fc_sparam.nodeName.u.wwn);
4452 if (phba->cfg_soft_wwpn)
4453 u64_to_wwn(phba->cfg_soft_wwpn,
4454 vport->fc_sparam.portName.u.wwn);
4455 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4456 sizeof(struct lpfc_name));
4457 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4458 sizeof(struct lpfc_name));
4459
4460 /* Update the fc_host data structures with new wwn. */
4461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4463
4464 /* Register SGL pool to the device using non-embedded mailbox command */
4465 rc = lpfc_sli4_post_sgl_list(phba);
4466 if (unlikely(rc)) {
4467 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4468 "0582 Error %d during sgl post operation", rc);
4469 rc = -ENODEV;
4470 goto out_free_vpd;
4471 }
4472
4473 /* Register SCSI SGL pool to the device */
4474 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4475 if (unlikely(rc)) {
4476 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4477 "0383 Error %d during scsi sgl post opeation",
4478 rc);
4479 /* Some Scsi buffers were moved to the abort scsi list */
4480 /* A pci function reset will repost them */
4481 rc = -ENODEV;
4482 goto out_free_vpd;
4483 }
4484
4485 /* Post the rpi header region to the device. */
4486 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4487 if (unlikely(rc)) {
4488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4489 "0393 Error %d during rpi post operation\n",
4490 rc);
4491 rc = -ENODEV;
4492 goto out_free_vpd;
4493 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496
4497 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba);
4499 if (unlikely(rc)) {
4500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4501 "0381 Error %d during queue setup.\n ", rc);
4502 goto out_stop_timers;
4503 }
4504
4505 /* Arm the CQs and then EQs on device */
4506 lpfc_sli4_arm_cqeq_intr(phba);
4507
4508 /* Indicate device interrupt mode */
4509 phba->sli4_hba.intr_enable = 1;
4510
4511 /* Allow asynchronous mailbox command to go through */
4512 spin_lock_irq(&phba->hbalock);
4513 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4514 spin_unlock_irq(&phba->hbalock);
4515
4516 /* Post receive buffers to the device */
4517 lpfc_sli4_rb_setup(phba);
4518
4519 /* Start the ELS watchdog timer */
4520 /*
4521 * The driver for SLI4 is not yet ready to process timeouts
4522 * or interrupts. Once it is, the comment bars can be removed.
4523 */
4524 /* mod_timer(&vport->els_tmofunc,
4525 * jiffies + HZ * (phba->fc_ratov*2)); */
4526
4527 /* Start heart beat timer */
4528 mod_timer(&phba->hb_tmofunc,
4529 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4530 phba->hb_outstanding = 0;
4531 phba->last_completion_time = jiffies;
4532
4533 /* Start error attention (ERATT) polling timer */
4534 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4535
4536 /*
4537 * The port is ready, set the host's link state to LINK_DOWN
4538 * in preparation for link interrupts.
4539 */
4540 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4541 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4542 lpfc_set_loopback_flag(phba);
4543 /* Change driver state to LPFC_LINK_DOWN right before init link */
4544 spin_lock_irq(&phba->hbalock);
4545 phba->link_state = LPFC_LINK_DOWN;
4546 spin_unlock_irq(&phba->hbalock);
4547 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4548 if (unlikely(rc != MBX_NOT_FINISHED)) {
4549 kfree(vpd);
4550 return 0;
4551 } else
4552 rc = -EIO;
4553
4554 /* Unset all the queues set up in this routine when error out */
4555 if (rc)
4556 lpfc_sli4_queue_unset(phba);
4557
4558out_stop_timers:
4559 if (rc)
4560 lpfc_stop_hba_timers(phba);
4561out_free_vpd:
4562 kfree(vpd);
4563out_free_mbox:
4564 mempool_free(mboxq, phba->mbox_mem_pool);
4565 return rc;
4566}
3203 4567
3204/** 4568/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4569 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4608lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4609{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4610 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4611 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4612 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4613 struct lpfc_sli_ring *pring;
3250 4614
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4645 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4646 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4647 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4648 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4649 spin_unlock_irq(&phba->hbalock);
3286 4650
3287 pring = &psli->ring[psli->fcp_ring]; 4651 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4653
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4655 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4656
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4657 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4658 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4659}
3308 4660
3309/** 4661/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4662 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4663 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4664 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4665 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4666 *
3315 * This function is called by discovery code and HBA management code 4667 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4668 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4669 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4670 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4671 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4672 * mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4684 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4685 * the function.
3334 **/ 4686 **/
3335int 4687static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4688lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4689 uint32_t flag)
3337{ 4690{
3338 MAILBOX_t *mb; 4691 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4692 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4702 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4703 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4704 /* processing mbox queue from intr_handler */
4705 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4706 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4707 return MBX_SUCCESS;
4708 }
3352 processing_queue = 1; 4709 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4710 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4711 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4722 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4723 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4724 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4725 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4726 dump_stack();
3370 goto out_not_finished; 4727 goto out_not_finished;
3371 } 4728 }
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4742
3386 psli = &phba->sli; 4743 psli = &phba->sli;
3387 4744
3388 mb = &pmbox->mb; 4745 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4746 status = MBX_SUCCESS;
3390 4747
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4748 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4750
3394 /* Mbox command <mbxCommand> cannot issue */ 4751 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4753 "(%d):0311 Mailbox command x%x cannot "
4754 "issue Data: x%x x%x\n",
4755 pmbox->vport ? pmbox->vport->vpi : 0,
4756 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4757 goto out_not_finished;
3397 } 4758 }
3398 4759
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4760 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4761 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4762 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4763 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4764 "(%d):2528 Mailbox command x%x cannot "
4765 "issue Data: x%x x%x\n",
4766 pmbox->vport ? pmbox->vport->vpi : 0,
4767 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4768 goto out_not_finished;
3404 } 4769 }
3405 4770
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4778 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4779
3415 /* Mbox command <mbxCommand> cannot issue */ 4780 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4782 "(%d):2529 Mailbox command x%x "
4783 "cannot issue Data: x%x x%x\n",
4784 pmbox->vport ? pmbox->vport->vpi : 0,
4785 pmbox->u.mb.mbxCommand,
4786 psli->sli_flag, flag);
3417 goto out_not_finished; 4787 goto out_not_finished;
3418 } 4788 }
3419 4789
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4790 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4792 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4793 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4794 "(%d):2530 Mailbox command x%x "
4795 "cannot issue Data: x%x x%x\n",
4796 pmbox->vport ? pmbox->vport->vpi : 0,
4797 pmbox->u.mb.mbxCommand,
4798 psli->sli_flag, flag);
3424 goto out_not_finished; 4799 goto out_not_finished;
3425 } 4800 }
3426 4801
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4837
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4838 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4839 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4840 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4841 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4842 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4843 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4844 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4846 "(%d):2531 Mailbox command x%x "
4847 "cannot issue Data: x%x x%x\n",
4848 pmbox->vport ? pmbox->vport->vpi : 0,
4849 pmbox->u.mb.mbxCommand,
4850 psli->sli_flag, flag);
3471 goto out_not_finished; 4851 goto out_not_finished;
3472 } 4852 }
3473 /* timeout active mbox command */ 4853 /* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4886 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4887 mb->mbxOwner = OWN_CHIP;
3508 4888
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4889 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4890 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4891 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4892 } else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4909
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4910 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4911 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4912 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4913 }
3534 } 4914 }
3535 4915
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4932 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4933 readl(phba->CAregaddr); /* flush */
3554 4934
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4935 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4936 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4937 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4938 word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4971 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4972 }
3593 4973
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4974 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4975 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4976 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4977 word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4984 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4985 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4986 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4987 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4988 word0 = slimword0;
3609 } 4989 }
3610 } 4990 }
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 4996 ha_copy = readl(phba->HAregaddr);
3617 } 4997 }
3618 4998
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4999 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5000 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5001 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5002 } else {
@@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5023
3644out_not_finished: 5024out_not_finished:
3645 if (processing_queue) { 5025 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5026 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5027 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5028 }
3649 return MBX_NOT_FINISHED; 5029 return MBX_NOT_FINISHED;
3650} 5030}
3651 5031
3652/** 5032/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object.
5036 *
5037 * The function posts a mailbox to the port. The mailbox is expected
5038 * to be comletely filled in and ready for the port to operate on it.
5039 * This routine executes a synchronous completion operation on the
5040 * mailbox by polling for its completion.
5041 *
5042 * The caller must not be holding any locks when calling this routine.
5043 *
5044 * Returns:
5045 * MBX_SUCCESS - mailbox posted successfully
5046 * Any of the MBX error values.
5047 **/
5048static int
5049lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5050{
5051 int rc = MBX_SUCCESS;
5052 unsigned long iflag;
5053 uint32_t db_ready;
5054 uint32_t mcqe_status;
5055 uint32_t mbx_cmnd;
5056 unsigned long timeout;
5057 struct lpfc_sli *psli = &phba->sli;
5058 struct lpfc_mqe *mb = &mboxq->u.mqe;
5059 struct lpfc_bmbx_create *mbox_rgn;
5060 struct dma_address *dma_address;
5061 struct lpfc_register bmbx_reg;
5062
5063 /*
5064 * Only one mailbox can be active to the bootstrap mailbox region
5065 * at a time and there is no queueing provided.
5066 */
5067 spin_lock_irqsave(&phba->hbalock, iflag);
5068 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5069 spin_unlock_irqrestore(&phba->hbalock, iflag);
5070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5071 "(%d):2532 Mailbox command x%x (x%x) "
5072 "cannot issue Data: x%x x%x\n",
5073 mboxq->vport ? mboxq->vport->vpi : 0,
5074 mboxq->u.mb.mbxCommand,
5075 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5076 psli->sli_flag, MBX_POLL);
5077 return MBXERR_ERROR;
5078 }
5079 /* The server grabs the token and owns it until release */
5080 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5081 phba->sli.mbox_active = mboxq;
5082 spin_unlock_irqrestore(&phba->hbalock, iflag);
5083
5084 /*
5085 * Initialize the bootstrap memory region to avoid stale data areas
5086 * in the mailbox post. Then copy the caller's mailbox contents to
5087 * the bmbx mailbox region.
5088 */
5089 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5090 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5091 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5092 sizeof(struct lpfc_mqe));
5093
5094 /* Post the high mailbox dma address to the port and wait for ready. */
5095 dma_address = &phba->sli4_hba.bmbx.dma_address;
5096 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5097
5098 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5099 * 1000) + jiffies;
5100 do {
5101 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5102 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5103 if (!db_ready)
5104 msleep(2);
5105
5106 if (time_after(jiffies, timeout)) {
5107 rc = MBXERR_ERROR;
5108 goto exit;
5109 }
5110 } while (!db_ready);
5111
5112 /* Post the low mailbox dma address to the port. */
5113 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5114 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5115 * 1000) + jiffies;
5116 do {
5117 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5118 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5119 if (!db_ready)
5120 msleep(2);
5121
5122 if (time_after(jiffies, timeout)) {
5123 rc = MBXERR_ERROR;
5124 goto exit;
5125 }
5126 } while (!db_ready);
5127
5128 /*
5129 * Read the CQ to ensure the mailbox has completed.
5130 * If so, update the mailbox status so that the upper layers
5131 * can complete the request normally.
5132 */
5133 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5134 sizeof(struct lpfc_mqe));
5135 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5136 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5137 sizeof(struct lpfc_mcqe));
5138 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5139
5140 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5141 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5142 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5143 rc = MBXERR_ERROR;
5144 }
5145
5146 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5147 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5148 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5149 " x%x x%x CQ: x%x x%x x%x x%x\n",
5150 mboxq->vport ? mboxq->vport->vpi : 0,
5151 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5152 bf_get(lpfc_mqe_status, mb),
5153 mb->un.mb_words[0], mb->un.mb_words[1],
5154 mb->un.mb_words[2], mb->un.mb_words[3],
5155 mb->un.mb_words[4], mb->un.mb_words[5],
5156 mb->un.mb_words[6], mb->un.mb_words[7],
5157 mb->un.mb_words[8], mb->un.mb_words[9],
5158 mb->un.mb_words[10], mb->un.mb_words[11],
5159 mb->un.mb_words[12], mboxq->mcqe.word0,
5160 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5161 mboxq->mcqe.trailer);
5162exit:
5163 /* We are holding the token, no needed for lock when release */
5164 spin_lock_irqsave(&phba->hbalock, iflag);
5165 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5166 phba->sli.mbox_active = NULL;
5167 spin_unlock_irqrestore(&phba->hbalock, iflag);
5168 return rc;
5169}
5170
5171/**
5172 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5173 * @phba: Pointer to HBA context object.
5174 * @pmbox: Pointer to mailbox object.
5175 * @flag: Flag indicating how the mailbox need to be processed.
5176 *
5177 * This function is called by discovery code and HBA management code to submit
5178 * a mailbox command to firmware with SLI-4 interface spec.
5179 *
5180 * Return codes the caller owns the mailbox command after the return of the
5181 * function.
5182 **/
5183static int
5184lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5185 uint32_t flag)
5186{
5187 struct lpfc_sli *psli = &phba->sli;
5188 unsigned long iflags;
5189 int rc;
5190
5191 /* Detect polling mode and jump to a handler */
5192 if (!phba->sli4_hba.intr_enable) {
5193 if (flag == MBX_POLL)
5194 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5195 else
5196 rc = -EIO;
5197 if (rc != MBX_SUCCESS)
5198 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5199 "(%d):2541 Mailbox command x%x "
5200 "(x%x) cannot issue Data: x%x x%x\n",
5201 mboxq->vport ? mboxq->vport->vpi : 0,
5202 mboxq->u.mb.mbxCommand,
5203 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5204 psli->sli_flag, flag);
5205 return rc;
5206 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) "
5209 "cannot issue Data: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag);
5214 return -EIO;
5215 }
5216
5217 /* Now, interrupt mode asynchrous mailbox command */
5218 rc = lpfc_mbox_cmd_check(phba, mboxq);
5219 if (rc) {
5220 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5221 "(%d):2543 Mailbox command x%x (x%x) "
5222 "cannot issue Data: x%x x%x\n",
5223 mboxq->vport ? mboxq->vport->vpi : 0,
5224 mboxq->u.mb.mbxCommand,
5225 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5226 psli->sli_flag, flag);
5227 goto out_not_finished;
5228 }
5229 rc = lpfc_mbox_dev_check(phba);
5230 if (unlikely(rc)) {
5231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5232 "(%d):2544 Mailbox command x%x (x%x) "
5233 "cannot issue Data: x%x x%x\n",
5234 mboxq->vport ? mboxq->vport->vpi : 0,
5235 mboxq->u.mb.mbxCommand,
5236 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5237 psli->sli_flag, flag);
5238 goto out_not_finished;
5239 }
5240
5241 /* Put the mailbox command to the driver internal FIFO */
5242 psli->slistat.mbox_busy++;
5243 spin_lock_irqsave(&phba->hbalock, iflags);
5244 lpfc_mbox_put(phba, mboxq);
5245 spin_unlock_irqrestore(&phba->hbalock, iflags);
5246 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5247 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5248 "x%x (x%x) x%x x%x x%x\n",
5249 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5250 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5251 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5252 phba->pport->port_state,
5253 psli->sli_flag, MBX_NOWAIT);
5254 /* Wake up worker thread to transport mailbox command from head */
5255 lpfc_worker_wake_up(phba);
5256
5257 return MBX_BUSY;
5258
5259out_not_finished:
5260 return MBX_NOT_FINISHED;
5261}
5262
5263/**
5264 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5265 * @phba: Pointer to HBA context object.
5266 *
5267 * This function is called by worker thread to send a mailbox command to
5268 * SLI4 HBA firmware.
5269 *
5270 **/
5271int
5272lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5273{
5274 struct lpfc_sli *psli = &phba->sli;
5275 LPFC_MBOXQ_t *mboxq;
5276 int rc = MBX_SUCCESS;
5277 unsigned long iflags;
5278 struct lpfc_mqe *mqe;
5279 uint32_t mbx_cmnd;
5280
5281 /* Check interrupt mode before post async mailbox command */
5282 if (unlikely(!phba->sli4_hba.intr_enable))
5283 return MBX_NOT_FINISHED;
5284
5285 /* Check for mailbox command service token */
5286 spin_lock_irqsave(&phba->hbalock, iflags);
5287 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5288 spin_unlock_irqrestore(&phba->hbalock, iflags);
5289 return MBX_NOT_FINISHED;
5290 }
5291 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5292 spin_unlock_irqrestore(&phba->hbalock, iflags);
5293 return MBX_NOT_FINISHED;
5294 }
5295 if (unlikely(phba->sli.mbox_active)) {
5296 spin_unlock_irqrestore(&phba->hbalock, iflags);
5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5298 "0384 There is pending active mailbox cmd\n");
5299 return MBX_NOT_FINISHED;
5300 }
5301 /* Take the mailbox command service token */
5302 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5303
5304 /* Get the next mailbox command from head of queue */
5305 mboxq = lpfc_mbox_get(phba);
5306
5307 /* If no more mailbox command waiting for post, we're done */
5308 if (!mboxq) {
5309 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5310 spin_unlock_irqrestore(&phba->hbalock, iflags);
5311 return MBX_SUCCESS;
5312 }
5313 phba->sli.mbox_active = mboxq;
5314 spin_unlock_irqrestore(&phba->hbalock, iflags);
5315
5316 /* Check device readiness for posting mailbox command */
5317 rc = lpfc_mbox_dev_check(phba);
5318 if (unlikely(rc))
5319 /* Driver clean routine will clean up pending mailbox */
5320 goto out_not_finished;
5321
5322 /* Prepare the mbox command to be posted */
5323 mqe = &mboxq->u.mqe;
5324 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5325
5326 /* Start timer for the mbox_tmo and log some mailbox post messages */
5327 mod_timer(&psli->mbox_tmo, (jiffies +
5328 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5329
5330 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5331 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5332 "x%x x%x\n",
5333 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5334 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5335 phba->pport->port_state, psli->sli_flag);
5336
5337 if (mbx_cmnd != MBX_HEARTBEAT) {
5338 if (mboxq->vport) {
5339 lpfc_debugfs_disc_trc(mboxq->vport,
5340 LPFC_DISC_TRC_MBOX_VPORT,
5341 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5342 mbx_cmnd, mqe->un.mb_words[0],
5343 mqe->un.mb_words[1]);
5344 } else {
5345 lpfc_debugfs_disc_trc(phba->pport,
5346 LPFC_DISC_TRC_MBOX,
5347 "MBOX Send: cmd:x%x mb:x%x x%x",
5348 mbx_cmnd, mqe->un.mb_words[0],
5349 mqe->un.mb_words[1]);
5350 }
5351 }
5352 psli->slistat.mbox_cmd++;
5353
5354 /* Post the mailbox command to the port */
5355 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5356 if (rc != MBX_SUCCESS) {
5357 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5358 "(%d):2533 Mailbox command x%x (x%x) "
5359 "cannot issue Data: x%x x%x\n",
5360 mboxq->vport ? mboxq->vport->vpi : 0,
5361 mboxq->u.mb.mbxCommand,
5362 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5363 psli->sli_flag, MBX_NOWAIT);
5364 goto out_not_finished;
5365 }
5366
5367 return rc;
5368
5369out_not_finished:
5370 spin_lock_irqsave(&phba->hbalock, iflags);
5371 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5372 __lpfc_mbox_cmpl_put(phba, mboxq);
5373 /* Release the token */
5374 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5375 phba->sli.mbox_active = NULL;
5376 spin_unlock_irqrestore(&phba->hbalock, iflags);
5377
5378 return MBX_NOT_FINISHED;
5379}
5380
5381/**
5382 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5383 * @phba: Pointer to HBA context object.
5384 * @pmbox: Pointer to mailbox object.
5385 * @flag: Flag indicating how the mailbox need to be processed.
5386 *
5387 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5388 * the API jump table function pointer from the lpfc_hba struct.
5389 *
5390 * Return codes the caller owns the mailbox command after the return of the
5391 * function.
5392 **/
5393int
5394lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5395{
5396 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5397}
5398
5399/**
5400 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5401 * @phba: The hba struct for which this call is being executed.
5402 * @dev_grp: The HBA PCI-Device group number.
5403 *
5404 * This routine sets up the mbox interface API function jump table in @phba
5405 * struct.
5406 * Returns: 0 - success, -ENODEV - failure.
5407 **/
5408int
5409lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5410{
5411
5412 switch (dev_grp) {
5413 case LPFC_PCI_DEV_LP:
5414 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5415 phba->lpfc_sli_handle_slow_ring_event =
5416 lpfc_sli_handle_slow_ring_event_s3;
5417 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5418 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5419 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5420 break;
5421 case LPFC_PCI_DEV_OC:
5422 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5423 phba->lpfc_sli_handle_slow_ring_event =
5424 lpfc_sli_handle_slow_ring_event_s4;
5425 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5426 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5427 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5428 break;
5429 default:
5430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5431 "1420 Invalid HBA PCI-device group: 0x%x\n",
5432 dev_grp);
5433 return -ENODEV;
5434 break;
5435 }
5436 return 0;
5437}
5438
5439/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5440 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5441 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5442 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5488}
3702 5489
3703/** 5490/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5491 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5492 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5493 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5494 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5495 * @flag: Flag indicating if this command can be put into txq.
3709 * 5496 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5497 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5498 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5499 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5500 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5501 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5502 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5503 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5504 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5505 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5506 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5507 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5508 * This function is called with hbalock held. The function will return success
3722 * 5509 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5510 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5511 **/
3727static int 5512static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5513__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5514 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5515{
3731 struct lpfc_iocbq *nextiocb; 5516 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5517 IOCB_t *iocb;
5518 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5519
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5520 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5521 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5619 return IOCB_BUSY;
3834} 5620}
3835 5621
5622/**
5623 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5624 * @phba: Pointer to HBA context object.
5625 * @piocb: Pointer to command iocb.
5626 * @sglq: Pointer to the scatter gather queue object.
5627 *
5628 * This routine converts the bpl or bde that is in the IOCB
5629 * to a sgl list for the sli4 hardware. The physical address
5630 * of the bpl/bde is converted back to a virtual address.
5631 * If the IOCB contains a BPL then the list of BDE's is
5632 * converted to sli4_sge's. If the IOCB contains a single
5633 * BDE then it is converted to a single sli_sge.
5634 * The IOCB is still in cpu endianess so the contents of
5635 * the bpl can be used without byte swapping.
5636 *
5637 * Returns valid XRI = Success, NO_XRI = Failure.
5638**/
5639static uint16_t
5640lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5641 struct lpfc_sglq *sglq)
5642{
5643 uint16_t xritag = NO_XRI;
5644 struct ulp_bde64 *bpl = NULL;
5645 struct ulp_bde64 bde;
5646 struct sli4_sge *sgl = NULL;
5647 IOCB_t *icmd;
5648 int numBdes = 0;
5649 int i = 0;
5650
5651 if (!piocbq || !sglq)
5652 return xritag;
5653
5654 sgl = (struct sli4_sge *)sglq->sgl;
5655 icmd = &piocbq->iocb;
5656 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5657 numBdes = icmd->un.genreq64.bdl.bdeSize /
5658 sizeof(struct ulp_bde64);
5659 /* The addrHigh and addrLow fields within the IOCB
5660 * have not been byteswapped yet so there is no
5661 * need to swap them back.
5662 */
5663 bpl = (struct ulp_bde64 *)
5664 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5665
5666 if (!bpl)
5667 return xritag;
5668
5669 for (i = 0; i < numBdes; i++) {
5670 /* Should already be byte swapped. */
5671 sgl->addr_hi = bpl->addrHigh;
5672 sgl->addr_lo = bpl->addrLow;
5673 /* swap the size field back to the cpu so we
5674 * can assign it to the sgl.
5675 */
5676 bde.tus.w = le32_to_cpu(bpl->tus.w);
5677 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5678 if ((i+1) == numBdes)
5679 bf_set(lpfc_sli4_sge_last, sgl, 1);
5680 else
5681 bf_set(lpfc_sli4_sge_last, sgl, 0);
5682 sgl->word2 = cpu_to_le32(sgl->word2);
5683 sgl->word3 = cpu_to_le32(sgl->word3);
5684 bpl++;
5685 sgl++;
5686 }
5687 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5688 /* The addrHigh and addrLow fields of the BDE have not
5689 * been byteswapped yet so they need to be swapped
5690 * before putting them in the sgl.
5691 */
5692 sgl->addr_hi =
5693 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5694 sgl->addr_lo =
5695 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5696 bf_set(lpfc_sli4_sge_len, sgl,
5697 icmd->un.genreq64.bdl.bdeSize);
5698 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 sgl->word2 = cpu_to_le32(sgl->word2);
5700 sgl->word3 = cpu_to_le32(sgl->word3);
5701 }
5702 return sglq->sli4_xritag;
5703}
5704
5705/**
5706 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5707 * @phba: Pointer to HBA context object.
5708 * @piocb: Pointer to command iocb.
5709 *
5710 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5711 * distribution.
5712 *
5713 * Return: index into SLI4 fast-path FCP queue index.
5714 **/
5715static uint32_t
5716lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5717{
5718 static uint32_t fcp_qidx;
5719
5720 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5721}
5722
5723/**
5724 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5725 * @phba: Pointer to HBA context object.
5726 * @piocb: Pointer to command iocb.
5727 * @wqe: Pointer to the work queue entry.
5728 *
5729 * This routine converts the iocb command to its Work Queue Entry
5730 * equivalent. The wqe pointer should not have any fields set when
5731 * this routine is called because it will memcpy over them.
5732 * This routine does not set the CQ_ID or the WQEC bits in the
5733 * wqe.
5734 *
5735 * Returns: 0 = Success, IOCB_ERROR = Failure.
5736 **/
5737static int
5738lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5739 union lpfc_wqe *wqe)
5740{
5741 uint32_t payload_len = 0;
5742 uint8_t ct = 0;
5743 uint32_t fip;
5744 uint32_t abort_tag;
5745 uint8_t command_type = ELS_COMMAND_NON_FIP;
5746 uint8_t cmnd;
5747 uint16_t xritag;
5748 struct ulp_bde64 *bpl = NULL;
5749
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND;
5758 else {
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5760 "2019 Invalid cmd 0x%x\n",
5761 iocbq->iocb.ulpCommand);
5762 return IOCB_ERROR;
5763 }
5764 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag;
5767 xritag = iocbq->sli4_xritag;
5768 wqe->words[7] = 0; /* The ct field has moved so reset */
5769 /* words0-2 bpl convert bde */
5770 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5771 bpl = (struct ulp_bde64 *)
5772 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5773 if (!bpl)
5774 return IOCB_ERROR;
5775
5776 /* Should already be byte swapped. */
5777 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5778 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5779 /* swap the size field back to the cpu so we
5780 * can assign it to the sgl.
5781 */
5782 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5783 payload_len = wqe->generic.bde.tus.f.bdeSize;
5784 } else
5785 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5786
5787 iocbq->iocb.ulpIoTag = iocbq->iotag;
5788 cmnd = iocbq->iocb.ulpCommand;
5789
5790 switch (iocbq->iocb.ulpCommand) {
5791 case CMD_ELS_REQUEST64_CR:
5792 if (!iocbq->iocb.ulpLe) {
5793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5794 "2007 Only Limited Edition cmd Format"
5795 " supported 0x%x\n",
5796 iocbq->iocb.ulpCommand);
5797 return IOCB_ERROR;
5798 }
5799 wqe->els_req.payload_len = payload_len;
5800 /* Els_reguest64 has a TMO */
5801 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5802 iocbq->iocb.ulpTimeout);
5803 /* Need a VF for word 4 set the vf bit*/
5804 bf_set(els_req64_vf, &wqe->els_req, 0);
5805 /* And a VFID for word 12 */
5806 bf_set(els_req64_vfid, &wqe->els_req, 0);
5807 /*
5808 * Set ct field to 3, indicates that the context_tag field
5809 * contains the FCFI and remote N_Port_ID is
5810 * in word 5.
5811 */
5812
5813 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext);
5816
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5825 break;
5826 case CMD_XMIT_SEQUENCE64_CR:
5827 /* word3 iocb=io_tag32 wqe=payload_offset */
5828 /* payload offset used for multilpe outstanding
5829 * sequences on the same exchange
5830 */
5831 wqe->words[3] = 0;
5832 /* word4 relative_offset memcpy */
5833 /* word5 r_ctl/df_ctl memcpy */
5834 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5835 wqe->xmit_sequence.xmit_len = payload_len;
5836 break;
5837 case CMD_XMIT_BCAST64_CN:
5838 /* word3 iocb=iotag32 wqe=payload_len */
5839 wqe->words[3] = 0; /* no definition for this in wqe */
5840 /* word4 iocb=rsvd wqe=rsvd */
5841 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5842 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5843 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5844 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5845 break;
5846 case CMD_FCP_IWRITE64_CR:
5847 command_type = FCP_COMMAND_DATA_OUT;
5848 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5849 * confusing.
5850 * word3 is payload_len: byte offset to the sgl entry for the
5851 * fcp_command.
5852 * word4 is total xfer len, same as the IOCB->ulpParameter.
5853 * word5 is initial xfer len 0 = wait for xfer-ready
5854 */
5855
5856 /* Always wait for xfer-ready before sending data */
5857 wqe->fcp_iwrite.initial_xfer_len = 0;
5858 /* word 4 (xfer length) should have been set on the memcpy */
5859
5860 /* allow write to fall through to read */
5861 case CMD_FCP_IREAD64_CR:
5862 /* FCP_CMD is always the 1st sgl entry */
5863 wqe->fcp_iread.payload_len =
5864 payload_len + sizeof(struct fcp_rsp);
5865
5866 /* word 4 (xfer length) should have been set on the memcpy */
5867
5868 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5869 iocbq->iocb.ulpFCP2Rcvy);
5870 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5871 /* The XC bit and the XS bit are similar. The driver never
5872 * tracked whether or not the exchange was previouslly open.
5873 * XC = Exchange create, 0 is create. 1 is already open.
5874 * XS = link cmd: 1 do not close the exchange after command.
5875 * XS = 0 close exchange when command completes.
5876 * The only time we would not set the XC bit is when the XS bit
5877 * is set and we are sending our 2nd or greater command on
5878 * this exchange.
5879 */
5880
5881 /* ALLOW read & write to fall through to ICMD64 */
5882 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5888 break;
5889 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the
5891 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5892 * sgl[0] = cmnd
5893 * sgl[1] = rsp.
5894 *
5895 */
5896 wqe->gen_req.command_len = payload_len;
5897 /* Word4 parameter copied in the memcpy */
5898 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5899 /* word6 context tag copied in memcpy */
5900 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
5901 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5903 "2015 Invalid CT %x command 0x%x\n",
5904 ct, iocbq->iocb.ulpCommand);
5905 return IOCB_ERROR;
5906 }
5907 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5908 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910
5911 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5912 command_type = OTHER_COMMAND;
5913 break;
5914 case CMD_XMIT_ELS_RSP64_CX:
5915 /* words0-2 BDE memcpy */
5916 /* word3 iocb=iotag32 wqe=rsvd */
5917 wqe->words[3] = 0;
5918 /* word4 iocb=did wge=rsvd. */
5919 wqe->words[4] = 0;
5920 /* word5 iocb=rsvd wge=did */
5921 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5922 iocbq->iocb.un.elsreq64.remoteID);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5925 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5926
5927 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5928 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5929 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5930 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5931 iocbq->vport->vpi + phba->vpi_base);
5932 command_type = OTHER_COMMAND;
5933 break;
5934 case CMD_CLOSE_XRI_CN:
5935 case CMD_ABORT_XRI_CN:
5936 case CMD_ABORT_XRI_CX:
5937 /* words 0-2 memcpy should be 0 rserved */
5938 /* port will send abts */
5939 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5940 /*
5941 * The link is down so the fw does not need to send abts
5942 * on the wire.
5943 */
5944 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5945 else
5946 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5947 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5948 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5949 wqe->words[5] = 0;
5950 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5951 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5952 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5953 wqe->generic.abort_tag = abort_tag;
5954 /*
5955 * The abort handler will send us CMD_ABORT_XRI_CN or
5956 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5957 */
5958 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
5959 cmnd = CMD_ABORT_XRI_CX;
5960 command_type = OTHER_COMMAND;
5961 xritag = 0;
5962 break;
5963 case CMD_XRI_ABORTED_CX:
5964 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
5965 /* words0-2 are all 0's no bde */
5966 /* word3 and word4 are rsvrd */
5967 wqe->words[3] = 0;
5968 wqe->words[4] = 0;
5969 /* word5 iocb=rsvd wge=did */
5970 /* There is no remote port id in the IOCB? */
5971 /* Let this fall through and fail */
5972 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
5973 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
5974 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
5975 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
5976 default:
5977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5978 "2014 Invalid command 0x%x\n",
5979 iocbq->iocb.ulpCommand);
5980 return IOCB_ERROR;
5981 break;
5982
5983 }
5984 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
5985 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
5986 wqe->generic.abort_tag = abort_tag;
5987 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
5988 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
5989 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
5990 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
5991
5992 return 0;
5993}
5994
5995/**
5996 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
5997 * @phba: Pointer to HBA context object.
5998 * @ring_number: SLI ring number to issue iocb on.
5999 * @piocb: Pointer to command iocb.
6000 * @flag: Flag indicating if this command can be put into txq.
6001 *
6002 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6003 * an iocb command to an HBA with SLI-4 interface spec.
6004 *
6005 * This function is called with hbalock held. The function will return success
6006 * after it successfully submit the iocb to firmware or after adding to the
6007 * txq.
6008 **/
6009static int
6010__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6011 struct lpfc_iocbq *piocb, uint32_t flag)
6012{
6013 struct lpfc_sglq *sglq;
6014 uint16_t xritag;
6015 union lpfc_wqe wqe;
6016 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6017 uint32_t fcp_wqidx;
6018
6019 if (piocb->sli4_xritag == NO_XRI) {
6020 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6021 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6022 sglq = NULL;
6023 else {
6024 sglq = __lpfc_sli_get_sglq(phba);
6025 if (!sglq)
6026 return IOCB_ERROR;
6027 piocb->sli4_xritag = sglq->sli4_xritag;
6028 }
6029 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6030 sglq = NULL; /* These IO's already have an XRI and
6031 * a mapped sgl.
6032 */
6033 } else {
6034 /* This is a continuation of a commandi,(CX) so this
6035 * sglq is on the active list
6036 */
6037 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6038 if (!sglq)
6039 return IOCB_ERROR;
6040 }
6041
6042 if (sglq) {
6043 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6044 if (xritag != sglq->sli4_xritag)
6045 return IOCB_ERROR;
6046 }
6047
6048 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6049 return IOCB_ERROR;
6050
6051 if (piocb->iocb_flag & LPFC_IO_FCP) {
6052 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6053 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6054 return IOCB_ERROR;
6055 } else {
6056 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6057 return IOCB_ERROR;
6058 }
6059 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6060
6061 return 0;
6062}
6063
6064/**
6065 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6066 *
6067 * This routine wraps the actual lockless version for issusing IOCB function
6068 * pointer from the lpfc_hba struct.
6069 *
6070 * Return codes:
6071 * IOCB_ERROR - Error
6072 * IOCB_SUCCESS - Success
6073 * IOCB_BUSY - Busy
6074 **/
6075static inline int
6076__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6077 struct lpfc_iocbq *piocb, uint32_t flag)
6078{
6079 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6080}
6081
6082/**
6083 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6084 * @phba: The hba struct for which this call is being executed.
6085 * @dev_grp: The HBA PCI-Device group number.
6086 *
6087 * This routine sets up the SLI interface API function jump table in @phba
6088 * struct.
6089 * Returns: 0 - success, -ENODEV - failure.
6090 **/
6091int
6092lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6093{
6094
6095 switch (dev_grp) {
6096 case LPFC_PCI_DEV_LP:
6097 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6098 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6099 break;
6100 case LPFC_PCI_DEV_OC:
6101 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6102 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6103 break;
6104 default:
6105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6106 "1419 Invalid HBA PCI-device group: 0x%x\n",
6107 dev_grp);
6108 return -ENODEV;
6109 break;
6110 }
6111 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6112 return 0;
6113}
3836 6114
3837/** 6115/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6116 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6126 * functions which do not hold hbalock.
3849 **/ 6127 **/
3850int 6128int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6129lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6130 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6131{
3854 unsigned long iflags; 6132 unsigned long iflags;
3855 int rc; 6133 int rc;
3856 6134
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6135 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6136 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6137 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6138
3861 return rc; 6139 return rc;
@@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6426}
4149 6427
4150/** 6428/**
6429 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6430 * @phba: Pointer to HBA context object.
6431 *
6432 * This routine flushes the mailbox command subsystem. It will unconditionally
6433 * flush all the mailbox commands in the three possible stages in the mailbox
6434 * command sub-system: pending mailbox command queue; the outstanding mailbox
6435 * command; and completed mailbox command queue. It is caller's responsibility
6436 * to make sure that the driver is in the proper state to flush the mailbox
6437 * command sub-system. Namely, the posting of mailbox commands into the
6438 * pending mailbox command queue from the various clients must be stopped;
6439 * either the HBA is in a state that it will never works on the outstanding
6440 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6441 * mailbox command has been completed.
6442 **/
6443static void
6444lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6445{
6446 LIST_HEAD(completions);
6447 struct lpfc_sli *psli = &phba->sli;
6448 LPFC_MBOXQ_t *pmb;
6449 unsigned long iflag;
6450
6451 /* Flush all the mailbox commands in the mbox system */
6452 spin_lock_irqsave(&phba->hbalock, iflag);
6453 /* The pending mailbox command queue */
6454 list_splice_init(&phba->sli.mboxq, &completions);
6455 /* The outstanding active mailbox command */
6456 if (psli->mbox_active) {
6457 list_add_tail(&psli->mbox_active->list, &completions);
6458 psli->mbox_active = NULL;
6459 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6460 }
6461 /* The completed mailbox command queue */
6462 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6463 spin_unlock_irqrestore(&phba->hbalock, iflag);
6464
6465 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6466 while (!list_empty(&completions)) {
6467 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6468 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6469 if (pmb->mbox_cmpl)
6470 pmb->mbox_cmpl(phba, pmb);
6471 }
6472}
6473
6474/**
4151 * lpfc_sli_host_down - Vport cleanup function 6475 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6476 * @vport: Pointer to virtual port object.
4153 * 6477 *
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6564 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6565 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6566 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6567 unsigned long flags = 0;
6568 int i;
6569
6570 /* Shutdown the mailbox command sub-system */
6571 lpfc_sli_mbox_sys_shutdown(phba);
4246 6572
4247 lpfc_hba_down_prep(phba); 6573 lpfc_hba_down_prep(phba);
4248 6574
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6613
4288 /* Return any active mbox cmds */ 6614 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6615 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6616
4292 spin_lock(&phba->pport->work_port_lock); 6617 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6618 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6619 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6620
4296 /* Return any pending or completed mbox cmds */ 6621 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6622}
4298 if (psli->mbox_active) { 6623
4299 list_add_tail(&psli->mbox_active->list, &completions); 6624/**
4300 psli->mbox_active = NULL; 6625 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6626 * @phba: Pointer to HBA context object.
4302 } 6627 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6628 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6629 * shutting down the SLI4 HBA FCoE function. This function is called with no
6630 * lock held and always returns 1.
6631 *
6632 * This function does the following to cleanup driver FCoE function resources:
6633 * - Free discovery resources for each virtual port
6634 * - Cleanup any pending fabric iocbs
6635 * - Iterate through the iocb txq and free each entry in the list.
6636 * - Free up any buffer posted to the HBA.
6637 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6638 * - Free mailbox commands in the mailbox queue.
6639 **/
6640int
6641lpfc_sli4_hba_down(struct lpfc_hba *phba)
6642{
6643 /* Stop the SLI4 device port */
6644 lpfc_stop_port(phba);
6645
6646 /* Tear down the queues in the HBA */
6647 lpfc_sli4_queue_unset(phba);
6648
6649 /* unregister default FCFI from the HBA */
6650 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6651
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6652 return 1;
4313} 6653}
4314 6654
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 6979 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 6980 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 6981 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 6982 if (phba->sli_rev == LPFC_SLI_REV4)
6983 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
6984 else
6985 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 6986 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 6987 iabt->ulpClass = icmd->ulpClass;
4645 6988
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 6998 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 6999 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7000 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7001 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7002
4660 if (retval) 7003 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7004 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7181 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7182 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7183 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7184 if (phba->sli_rev == LPFC_SLI_REV4)
7185 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7186 else
7187 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7188 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7189 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7190 abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7196
4851 /* Setup callback routine and issue the command. */ 7197 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7198 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7199 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7200 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7201 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7202 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7203 errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7278 **/
4932int 7279int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7280lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7281 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7282 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7283 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7284 uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7309 readl(phba->HCregaddr); /* flush */
4963 } 7310 }
4964 7311
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7312 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7313 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7314 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7315 timeleft = wait_event_timeout(done_q,
@@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7424}
5078 7425
5079/** 7426/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7427 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7428 * @phba: Pointer to HBA context.
5082 * 7429 *
5083 * This function is called to cleanup any pending mailbox 7430 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7431 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7432 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7433 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7434 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7435 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7436 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7437 * as with offline or HBA function reset), this routine will wait for the
7438 * outstanding mailbox command to complete before invoking the mailbox
7439 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7440 **/
5091int 7441void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7442lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7443{
5094 struct lpfc_vport *vport = phba->pport; 7444 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7445 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7446 unsigned long timeout;
5097 7447
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7448 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7449 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7450 spin_unlock_irq(&phba->hbalock);
5101 7451
5102 /* 7452 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7453 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7454 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7455 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7456 spin_unlock_irq(&phba->hbalock);
7457 /* Determine how long we might wait for the active mailbox
7458 * command to be gracefully completed by firmware.
7459 */
7460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7461 1000) + jiffies;
7462 while (phba->sli.mbox_active) {
7463 /* Check active mailbox complete status every 2ms */
7464 msleep(2);
7465 if (time_after(jiffies, timeout))
7466 /* Timeout, let the mailbox flush routine to
7467 * forcefully release active mailbox command
7468 */
7469 break;
7470 }
7471 }
7472 lpfc_sli_mbox_sys_flush(phba);
7473}
7474
7475/**
7476 * lpfc_sli_eratt_read - read sli-3 error attention events
7477 * @phba: Pointer to HBA context.
7478 *
7479 * This function is called to read the SLI3 device error attention registers
7480 * for possible error attention events. The caller must hold the hostlock
7481 * with spin_lock_irq().
7482 *
7483 * This fucntion returns 1 when there is Error Attention in the Host Attention
7484 * Register and returns 0 otherwise.
7485 **/
7486static int
7487lpfc_sli_eratt_read(struct lpfc_hba *phba)
7488{
7489 uint32_t ha_copy;
5111 7490
5112 if (ha_copy & HA_MBATT) 7491 /* Read chip Host Attention (HA) register */
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7492 ha_copy = readl(phba->HAregaddr);
5114 i = 0; 7493 if (ha_copy & HA_ERATT) {
7494 /* Read host status register to retrieve error event */
7495 lpfc_sli_read_hs(phba);
7496
7497 /* Check if there is a deferred error condition is active */
7498 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr);
7507 }
5115 7508
5116 msleep(1); 7509 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1;
5117 } 7516 }
7517 return 0;
7518}
7519
7520/**
7521 * lpfc_sli4_eratt_read - read sli-4 error attention events
7522 * @phba: Pointer to HBA context.
7523 *
7524 * This function is called to read the SLI4 device error attention registers
7525 * for possible error attention events. The caller must hold the hostlock
7526 * with spin_lock_irq().
7527 *
7528 * This fucntion returns 1 when there is Error Attention in the Host Attention
7529 * Register and returns 0 otherwise.
7530 **/
7531static int
7532lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7533{
7534 uint32_t uerr_sta_hi, uerr_sta_lo;
7535 uint32_t onlnreg0, onlnreg1;
5118 7536
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7537 /* For now, use the SLI4 device internal unrecoverable error
7538 * registers for error attention. This can be changed later.
7539 */
7540 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7541 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7542 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7543 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7544 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7545 if (uerr_sta_lo || uerr_sta_hi) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7547 "1423 HBA Unrecoverable error: "
7548 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7549 "online0_reg=0x%x, online1_reg=0x%x\n",
7550 uerr_sta_lo, uerr_sta_hi,
7551 onlnreg0, onlnreg1);
7552 /* TEMP: as the driver error recover logic is not
7553 * fully developed, we just log the error message
7554 * and the device error attention action is now
7555 * temporarily disabled.
7556 */
7557 return 0;
7558 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1;
7567 }
7568 }
7569 return 0;
5120} 7570}
5121 7571
5122/** 7572/**
5123 * lpfc_sli_check_eratt - check error attention events 7573 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7574 * @phba: Pointer to HBA context.
5125 * 7575 *
5126 * This function is called form timer soft interrupt context to check HBA's 7576 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7577 * error attention register bit for error attention events.
5128 * 7578 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7579 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7584{
5135 uint32_t ha_copy; 7585 uint32_t ha_copy;
5136 7586
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7587 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7588 * here. The brdkill function will do this.
5143 */ 7589 */
@@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7607 return 0;
5162 } 7608 }
5163 7609
5164 /* Read chip Host Attention (HA) register */ 7610 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7611 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7612 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7613 return 0;
7614 }
7615
7616 switch (phba->sli_rev) {
7617 case LPFC_SLI_REV2:
7618 case LPFC_SLI_REV3:
7619 /* Read chip Host Attention (HA) register */
7620 ha_copy = lpfc_sli_eratt_read(phba);
7621 break;
7622 case LPFC_SLI_REV4:
7623 /* Read devcie Uncoverable Error (UERR) registers */
7624 ha_copy = lpfc_sli4_eratt_read(phba);
7625 break;
7626 default:
7627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7628 "0299 Invalid SLI revision (%d)\n",
7629 phba->sli_rev);
7630 ha_copy = 0;
7631 break;
5186 } 7632 }
5187 spin_unlock_irq(&phba->hbalock); 7633 spin_unlock_irq(&phba->hbalock);
7634
7635 return ha_copy;
7636}
7637
7638/**
7639 * lpfc_intr_state_check - Check device state for interrupt handling
7640 * @phba: Pointer to HBA context.
7641 *
7642 * This inline routine checks whether a device or its PCI slot is in a state
7643 * that the interrupt should be handled.
7644 *
7645 * This function returns 0 if the device or the PCI slot is in a state that
7646 * interrupt should be handled, otherwise -EIO.
7647 */
7648static inline int
7649lpfc_intr_state_check(struct lpfc_hba *phba)
7650{
7651 /* If the pci channel is offline, ignore all the interrupts */
7652 if (unlikely(pci_channel_offline(phba->pcidev)))
7653 return -EIO;
7654
7655 /* Update device level interrupt statistics */
7656 phba->sli.slistat.sli_intr++;
7657
7658 /* Ignore all interrupts during initialization. */
7659 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7660 return -EIO;
7661
5188 return 0; 7662 return 0;
5189} 7663}
5190 7664
5191/** 7665/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7666 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7667 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7668 * @dev_id: The device context pointer.
5195 * 7669 *
5196 * This function is directly called from the PCI layer as an interrupt 7670 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7671 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7672 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7673 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7674 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7675 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7676 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7677 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7678 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7679 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7680 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7681 * structures.
5208 * 7682 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7683 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7684 * returns IRQ_NONE.
5211 **/ 7685 **/
5212irqreturn_t 7686irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7687lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7688{
5215 struct lpfc_hba *phba; 7689 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7690 uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7714 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7715 */
5242 if (phba->intr_type == MSIX) { 7716 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7717 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7718 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7719 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7720 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7721 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7740 * interrupt.
5272 */ 7741 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7742 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7743 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7744 return IRQ_NONE;
5276 } 7745 }
5277 7746
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7833
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7834 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7835 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7836 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7837 mbox = phba->mbox;
5369 vport = pmb->vport; 7838 vport = pmb->vport;
5370 7839
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 7903 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 7904 "0350 rc should have"
5436 "been MBX_BUSY"); 7905 "been MBX_BUSY");
5437 goto send_current_mbox; 7906 if (rc != MBX_NOT_FINISHED)
7907 goto send_current_mbox;
5438 } 7908 }
5439 } 7909 }
5440 spin_lock_irqsave( 7910 spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
5471 } 7941 }
5472 return IRQ_HANDLED; 7942 return IRQ_HANDLED;
5473 7943
5474} /* lpfc_sp_intr_handler */ 7944} /* lpfc_sli_sp_intr_handler */
5475 7945
5476/** 7946/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 7947 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 7948 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 7949 * @dev_id: The device context pointer.
5480 * 7950 *
5481 * This function is directly called from the PCI layer as an interrupt 7951 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 7952 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 7953 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 7954 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 7955 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 7956 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 7957 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 7958 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 7959 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 7960 * It gets the hbalock to access and update SLI data structures.
5491 * 7961 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 7962 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 7963 * returns IRQ_NONE.
5494 **/ 7964 **/
5495irqreturn_t 7965irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 7966lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 7967{
5498 struct lpfc_hba *phba; 7968 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 7969 uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 7983 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 7984 */
5515 if (phba->intr_type == MSIX) { 7985 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 7986 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 7987 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 7988 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 7989 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 7990 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 7995 * any interrupt.
5531 */ 7996 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7997 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 7998 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 7999 return IRQ_NONE;
5535 } 8000 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8001 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8031 }
5567 } 8032 }
5568 return IRQ_HANDLED; 8033 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8034} /* lpfc_sli_fp_intr_handler */
5570 8035
5571/** 8036/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8037 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8038 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8039 * @dev_id: The device context pointer.
5575 * 8040 *
5576 * This function is the device-level interrupt handler called from the PCI 8041 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8042 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8043 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8044 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8045 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8046 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8047 * function is called without any lock held. It gets the hbalock to access
8048 * and update SLI data structures.
5583 * 8049 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8050 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8051 * returns IRQ_NONE.
5586 **/ 8052 **/
5587irqreturn_t 8053irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8054lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8055{
5590 struct lpfc_hba *phba; 8056 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8057 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8066 if (unlikely(!phba))
5601 return IRQ_NONE; 8067 return IRQ_NONE;
5602 8068
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8069 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8070 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8071 return IRQ_NONE;
5613 8072
5614 spin_lock(&phba->hbalock); 8073 spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8109 status2 >>= (4*LPFC_ELS_RING);
5651 8110
5652 if (status1 || (status2 & HA_RXMASK)) 8111 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8112 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8113 else
5655 sp_irq_rc = IRQ_NONE; 8114 sp_irq_rc = IRQ_NONE;
5656 8115
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8129 status2 = 0;
5671 8130
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8131 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8132 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8133 else
5675 fp_irq_rc = IRQ_NONE; 8134 fp_irq_rc = IRQ_NONE;
5676 8135
5677 /* Return device-level interrupt handling status */ 8136 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8137 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8138} /* lpfc_sli_intr_handler */
8139
8140/**
8141 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8142 * @phba: pointer to lpfc hba data structure.
8143 *
8144 * This routine is invoked by the worker thread to process all the pending
8145 * SLI4 FCP abort XRI events.
8146 **/
8147void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8148{
8149 struct lpfc_cq_event *cq_event;
8150
8151 /* First, declare the fcp xri abort event has been handled */
8152 spin_lock_irq(&phba->hbalock);
8153 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8154 spin_unlock_irq(&phba->hbalock);
8155 /* Now, handle all the fcp xri abort events */
8156 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8157 /* Get the first event from the head of the event queue */
8158 spin_lock_irq(&phba->hbalock);
8159 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8160 cq_event, struct lpfc_cq_event, list);
8161 spin_unlock_irq(&phba->hbalock);
8162 /* Notify aborted XRI for FCP work queue */
8163 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8164 /* Free the event processed back to the free pool */
8165 lpfc_sli4_cq_event_release(phba, cq_event);
8166 }
8167}
8168
8169/**
8170 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8171 * @phba: pointer to lpfc hba data structure.
8172 *
8173 * This routine is invoked by the worker thread to process all the pending
8174 * SLI4 els abort xri events.
8175 **/
8176void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8177{
8178 struct lpfc_cq_event *cq_event;
8179
8180 /* First, declare the els xri abort event has been handled */
8181 spin_lock_irq(&phba->hbalock);
8182 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8183 spin_unlock_irq(&phba->hbalock);
8184 /* Now, handle all the els xri abort events */
8185 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8186 /* Get the first event from the head of the event queue */
8187 spin_lock_irq(&phba->hbalock);
8188 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8189 cq_event, struct lpfc_cq_event, list);
8190 spin_unlock_irq(&phba->hbalock);
8191 /* Notify aborted XRI for ELS work queue */
8192 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8193 /* Free the event processed back to the free pool */
8194 lpfc_sli4_cq_event_release(phba, cq_event);
8195 }
8196}
8197
8198static void
8199lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8200 struct lpfc_iocbq *pIocbOut,
8201 struct lpfc_wcqe_complete *wcqe)
8202{
8203 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8204
8205 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8206 sizeof(struct lpfc_iocbq) - offset);
8207 memset(&pIocbIn->sli4_info, 0,
8208 sizeof(struct lpfc_sli4_rspiocb_info));
8209 /* Map WCQE parameters into irspiocb parameters */
8210 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8211 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8212 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8213 pIocbIn->iocb.un.fcpi.fcpi_parm =
8214 pIocbOut->iocb.un.fcpi.fcpi_parm -
8215 wcqe->total_data_placed;
8216 else
8217 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8218 else
8219 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8220 /* Load in additional WCQE parameters */
8221 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8222 pIocbIn->sli4_info.bfield = 0;
8223 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8224 pIocbIn->sli4_info.bfield |= LPFC_XB;
8225 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8226 pIocbIn->sli4_info.bfield |= LPFC_PV;
8227 pIocbIn->sli4_info.priority =
8228 bf_get(lpfc_wcqe_c_priority, wcqe);
8229 }
8230}
8231
8232/**
8233 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8234 * @phba: Pointer to HBA context object.
8235 * @cqe: Pointer to mailbox completion queue entry.
8236 *
8237 * This routine process a mailbox completion queue entry with asynchrous
8238 * event.
8239 *
8240 * Return: true if work posted to worker thread, otherwise false.
8241 **/
8242static bool
8243lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8244{
8245 struct lpfc_cq_event *cq_event;
8246 unsigned long iflags;
8247
8248 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8249 "0392 Async Event: word0:x%x, word1:x%x, "
8250 "word2:x%x, word3:x%x\n", mcqe->word0,
8251 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8252
8253 /* Allocate a new internal CQ_EVENT entry */
8254 cq_event = lpfc_sli4_cq_event_alloc(phba);
8255 if (!cq_event) {
8256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8257 "0394 Failed to allocate CQ_EVENT entry\n");
8258 return false;
8259 }
8260
8261 /* Move the CQE into an asynchronous event entry */
8262 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8263 spin_lock_irqsave(&phba->hbalock, iflags);
8264 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8265 /* Set the async event flag */
8266 phba->hba_flag |= ASYNC_EVENT;
8267 spin_unlock_irqrestore(&phba->hbalock, iflags);
8268
8269 return true;
8270}
8271
8272/**
8273 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8274 * @phba: Pointer to HBA context object.
8275 * @cqe: Pointer to mailbox completion queue entry.
8276 *
8277 * This routine process a mailbox completion queue entry with mailbox
8278 * completion event.
8279 *
8280 * Return: true if work posted to worker thread, otherwise false.
8281 **/
8282static bool
8283lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8284{
8285 uint32_t mcqe_status;
8286 MAILBOX_t *mbox, *pmbox;
8287 struct lpfc_mqe *mqe;
8288 struct lpfc_vport *vport;
8289 struct lpfc_nodelist *ndlp;
8290 struct lpfc_dmabuf *mp;
8291 unsigned long iflags;
8292 LPFC_MBOXQ_t *pmb;
8293 bool workposted = false;
8294 int rc;
8295
8296 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8297 if (!bf_get(lpfc_trailer_completed, mcqe))
8298 goto out_no_mqe_complete;
8299
8300 /* Get the reference to the active mbox command */
8301 spin_lock_irqsave(&phba->hbalock, iflags);
8302 pmb = phba->sli.mbox_active;
8303 if (unlikely(!pmb)) {
8304 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8305 "1832 No pending MBOX command to handle\n");
8306 spin_unlock_irqrestore(&phba->hbalock, iflags);
8307 goto out_no_mqe_complete;
8308 }
8309 spin_unlock_irqrestore(&phba->hbalock, iflags);
8310 mqe = &pmb->u.mqe;
8311 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8312 mbox = phba->mbox;
8313 vport = pmb->vport;
8314
8315 /* Reset heartbeat timer */
8316 phba->last_completion_time = jiffies;
8317 del_timer(&phba->sli.mbox_tmo);
8318
8319 /* Move mbox data to caller's mailbox region, do endian swapping */
8320 if (pmb->mbox_cmpl && mbox)
8321 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8322 /* Set the mailbox status with SLI4 range 0x4000 */
8323 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8324 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8325 bf_set(lpfc_mqe_status, mqe,
8326 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8327
8328 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8329 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8331 "MBOX dflt rpi: status:x%x rpi:x%x",
8332 mcqe_status,
8333 pmbox->un.varWords[0], 0);
8334 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8335 mp = (struct lpfc_dmabuf *)(pmb->context1);
8336 ndlp = (struct lpfc_nodelist *)pmb->context2;
8337 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8338 * RID of the PPI using the same mbox buffer.
8339 */
8340 lpfc_unreg_login(phba, vport->vpi,
8341 pmbox->un.varWords[0], pmb);
8342 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8343 pmb->context1 = mp;
8344 pmb->context2 = ndlp;
8345 pmb->vport = vport;
8346 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8347 if (rc != MBX_BUSY)
8348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8349 LOG_SLI, "0385 rc should "
8350 "have been MBX_BUSY\n");
8351 if (rc != MBX_NOT_FINISHED)
8352 goto send_current_mbox;
8353 }
8354 }
8355 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8356 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8357 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8358
8359 /* There is mailbox completion work to do */
8360 spin_lock_irqsave(&phba->hbalock, iflags);
8361 __lpfc_mbox_cmpl_put(phba, pmb);
8362 phba->work_ha |= HA_MBATT;
8363 spin_unlock_irqrestore(&phba->hbalock, iflags);
8364 workposted = true;
8365
8366send_current_mbox:
8367 spin_lock_irqsave(&phba->hbalock, iflags);
8368 /* Release the mailbox command posting token */
8369 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8370 /* Setting active mailbox pointer need to be in sync to flag clear */
8371 phba->sli.mbox_active = NULL;
8372 spin_unlock_irqrestore(&phba->hbalock, iflags);
8373 /* Wake up worker thread to post the next pending mailbox command */
8374 lpfc_worker_wake_up(phba);
8375out_no_mqe_complete:
8376 if (bf_get(lpfc_trailer_consumed, mcqe))
8377 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8378 return workposted;
8379}
8380
8381/**
8382 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8383 * @phba: Pointer to HBA context object.
8384 * @cqe: Pointer to mailbox completion queue entry.
8385 *
8386 * This routine process a mailbox completion queue entry, it invokes the
8387 * proper mailbox complete handling or asynchrous event handling routine
8388 * according to the MCQE's async bit.
8389 *
8390 * Return: true if work posted to worker thread, otherwise false.
8391 **/
8392static bool
8393lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8394{
8395 struct lpfc_mcqe mcqe;
8396 bool workposted;
8397
8398 /* Copy the mailbox MCQE and convert endian order as needed */
8399 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8400
8401 /* Invoke the proper event handling routine */
8402 if (!bf_get(lpfc_trailer_async, &mcqe))
8403 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8404 else
8405 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8406 return workposted;
8407}
8408
8409/**
8410 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8411 * @phba: Pointer to HBA context object.
8412 * @wcqe: Pointer to work-queue completion queue entry.
8413 *
8414 * This routine handles an ELS work-queue completion event.
8415 *
8416 * Return: true if work posted to worker thread, otherwise false.
8417 **/
8418static bool
8419lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8420 struct lpfc_wcqe_complete *wcqe)
8421{
8422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8423 struct lpfc_iocbq *cmdiocbq;
8424 struct lpfc_iocbq *irspiocbq;
8425 unsigned long iflags;
8426 bool workposted = false;
8427
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pring->stats.iocb_event++;
8430 /* Look up the ELS command IOCB and create pseudo response IOCB */
8431 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8432 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434
8435 if (unlikely(!cmdiocbq)) {
8436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8437 "0386 ELS complete with no corresponding "
8438 "cmdiocb: iotag (%d)\n",
8439 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8440 return workposted;
8441 }
8442
8443 /* Fake the irspiocbq and copy necessary response information */
8444 irspiocbq = lpfc_sli_get_iocbq(phba);
8445 if (!irspiocbq) {
8446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8447 "0387 Failed to allocate an iocbq\n");
8448 return workposted;
8449 }
8450 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8451
8452 /* Add the irspiocb to the response IOCB work list */
8453 spin_lock_irqsave(&phba->hbalock, iflags);
8454 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8455 /* Indicate ELS ring attention */
8456 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8457 spin_unlock_irqrestore(&phba->hbalock, iflags);
8458 workposted = true;
8459
8460 return workposted;
8461}
8462
8463/**
8464 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8465 * @phba: Pointer to HBA context object.
8466 * @wcqe: Pointer to work-queue completion queue entry.
8467 *
8468 * This routine handles slow-path WQ entry comsumed event by invoking the
8469 * proper WQ release routine to the slow-path WQ.
8470 **/
8471static void
8472lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8473 struct lpfc_wcqe_release *wcqe)
8474{
8475 /* Check for the slow-path ELS work queue */
8476 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8477 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8478 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8479 else
8480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8481 "2579 Slow-path wqe consume event carries "
8482 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8483 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8484 phba->sli4_hba.els_wq->queue_id);
8485}
8486
8487/**
8488 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8489 * @phba: Pointer to HBA context object.
8490 * @cq: Pointer to a WQ completion queue.
8491 * @wcqe: Pointer to work-queue completion queue entry.
8492 *
8493 * This routine handles an XRI abort event.
8494 *
8495 * Return: true if work posted to worker thread, otherwise false.
8496 **/
8497static bool
8498lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8499 struct lpfc_queue *cq,
8500 struct sli4_wcqe_xri_aborted *wcqe)
8501{
8502 bool workposted = false;
8503 struct lpfc_cq_event *cq_event;
8504 unsigned long iflags;
8505
8506 /* Allocate a new internal CQ_EVENT entry */
8507 cq_event = lpfc_sli4_cq_event_alloc(phba);
8508 if (!cq_event) {
8509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8510 "0602 Failed to allocate CQ_EVENT entry\n");
8511 return false;
8512 }
8513
8514 /* Move the CQE into the proper xri abort event list */
8515 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8516 switch (cq->subtype) {
8517 case LPFC_FCP:
8518 spin_lock_irqsave(&phba->hbalock, iflags);
8519 list_add_tail(&cq_event->list,
8520 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8521 /* Set the fcp xri abort event flag */
8522 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8523 spin_unlock_irqrestore(&phba->hbalock, iflags);
8524 workposted = true;
8525 break;
8526 case LPFC_ELS:
8527 spin_lock_irqsave(&phba->hbalock, iflags);
8528 list_add_tail(&cq_event->list,
8529 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8530 /* Set the els xri abort event flag */
8531 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8532 spin_unlock_irqrestore(&phba->hbalock, iflags);
8533 workposted = true;
8534 break;
8535 default:
8536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8537 "0603 Invalid work queue CQE subtype (x%x)\n",
8538 cq->subtype);
8539 workposted = false;
8540 break;
8541 }
8542 return workposted;
8543}
8544
8545/**
8546 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8547 * @phba: Pointer to HBA context object.
8548 * @cq: Pointer to the completion queue.
8549 * @wcqe: Pointer to a completion queue entry.
8550 *
8551 * This routine process a slow-path work-queue completion queue entry.
8552 *
8553 * Return: true if work posted to worker thread, otherwise false.
8554 **/
8555static bool
8556lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8557 struct lpfc_cqe *cqe)
8558{
8559 struct lpfc_wcqe_complete wcqe;
8560 bool workposted = false;
8561
8562 /* Copy the work queue CQE and convert endian order if needed */
8563 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8564
8565 /* Check and process for different type of WCQE and dispatch */
8566 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8567 case CQE_CODE_COMPL_WQE:
8568 /* Process the WQ complete event */
8569 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8570 (struct lpfc_wcqe_complete *)&wcqe);
8571 break;
8572 case CQE_CODE_RELEASE_WQE:
8573 /* Process the WQ release event */
8574 lpfc_sli4_sp_handle_rel_wcqe(phba,
8575 (struct lpfc_wcqe_release *)&wcqe);
8576 break;
8577 case CQE_CODE_XRI_ABORTED:
8578 /* Process the WQ XRI abort event */
8579 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8580 (struct sli4_wcqe_xri_aborted *)&wcqe);
8581 break;
8582 default:
8583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8584 "0388 Not a valid WCQE code: x%x\n",
8585 bf_get(lpfc_wcqe_c_code, &wcqe));
8586 break;
8587 }
8588 return workposted;
8589}
8590
8591/**
8592 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8593 * @phba: Pointer to HBA context object.
8594 * @rcqe: Pointer to receive-queue completion queue entry.
8595 *
8596 * This routine process a receive-queue completion queue entry.
8597 *
8598 * Return: true if work posted to worker thread, otherwise false.
8599 **/
8600static bool
8601lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8602{
8603 struct lpfc_rcqe rcqe;
8604 bool workposted = false;
8605 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8606 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8607 struct hbq_dmabuf *dma_buf;
8608 uint32_t status;
8609 unsigned long iflags;
8610
8611 /* Copy the receive queue CQE and convert endian order if needed */
8612 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8613 lpfc_sli4_rq_release(hrq, drq);
8614 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8615 goto out;
8616 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8617 goto out;
8618
8619 status = bf_get(lpfc_rcqe_status, &rcqe);
8620 switch (status) {
8621 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8623 "2537 Receive Frame Truncated!!\n");
8624 case FC_STATUS_RQ_SUCCESS:
8625 spin_lock_irqsave(&phba->hbalock, iflags);
8626 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8627 if (!dma_buf) {
8628 spin_unlock_irqrestore(&phba->hbalock, iflags);
8629 goto out;
8630 }
8631 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8632 /* save off the frame for the word thread to process */
8633 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8634 /* Frame received */
8635 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8636 spin_unlock_irqrestore(&phba->hbalock, iflags);
8637 workposted = true;
8638 break;
8639 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8640 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8641 /* Post more buffers if possible */
8642 spin_lock_irqsave(&phba->hbalock, iflags);
8643 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8644 spin_unlock_irqrestore(&phba->hbalock, iflags);
8645 workposted = true;
8646 break;
8647 }
8648out:
8649 return workposted;
8650
8651}
8652
8653/**
8654 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8655 * @phba: Pointer to HBA context object.
8656 * @eqe: Pointer to fast-path event queue entry.
8657 *
8658 * This routine process a event queue entry from the slow-path event queue.
8659 * It will check the MajorCode and MinorCode to determine this is for a
8660 * completion event on a completion queue, if not, an error shall be logged
8661 * and just return. Otherwise, it will get to the corresponding completion
8662 * queue and process all the entries on that completion queue, rearm the
8663 * completion queue, and then return.
8664 *
8665 **/
8666static void
8667lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8668{
8669 struct lpfc_queue *cq = NULL, *childq, *speq;
8670 struct lpfc_cqe *cqe;
8671 bool workposted = false;
8672 int ecount = 0;
8673 uint16_t cqid;
8674
8675 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8676 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8678 "0359 Not a valid slow-path completion "
8679 "event: majorcode=x%x, minorcode=x%x\n",
8680 bf_get(lpfc_eqe_major_code, eqe),
8681 bf_get(lpfc_eqe_minor_code, eqe));
8682 return;
8683 }
8684
8685 /* Get the reference to the corresponding CQ */
8686 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8687
8688 /* Search for completion queue pointer matching this cqid */
8689 speq = phba->sli4_hba.sp_eq;
8690 list_for_each_entry(childq, &speq->child_list, list) {
8691 if (childq->queue_id == cqid) {
8692 cq = childq;
8693 break;
8694 }
8695 }
8696 if (unlikely(!cq)) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8698 "0365 Slow-path CQ identifier (%d) does "
8699 "not exist\n", cqid);
8700 return;
8701 }
8702
8703 /* Process all the entries to the CQ */
8704 switch (cq->type) {
8705 case LPFC_MCQ:
8706 while ((cqe = lpfc_sli4_cq_get(cq))) {
8707 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8708 if (!(++ecount % LPFC_GET_QE_REL_INT))
8709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8710 }
8711 break;
8712 case LPFC_WCQ:
8713 while ((cqe = lpfc_sli4_cq_get(cq))) {
8714 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8715 if (!(++ecount % LPFC_GET_QE_REL_INT))
8716 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8717 }
8718 break;
8719 case LPFC_RCQ:
8720 while ((cqe = lpfc_sli4_cq_get(cq))) {
8721 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8722 if (!(++ecount % LPFC_GET_QE_REL_INT))
8723 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8724 }
8725 break;
8726 default:
8727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8728 "0370 Invalid completion queue type (%d)\n",
8729 cq->type);
8730 return;
8731 }
8732
8733 /* Catch the no cq entry condition, log an error */
8734 if (unlikely(ecount == 0))
8735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8736 "0371 No entry from the CQ: identifier "
8737 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8738
8739 /* In any case, flash and re-arm the RCQ */
8740 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8741
8742 /* wake up worker thread if there are works to be done */
8743 if (workposted)
8744 lpfc_worker_wake_up(phba);
8745}
8746
8747/**
8748 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8749 * @eqe: Pointer to fast-path completion queue entry.
8750 *
8751 * This routine process a fast-path work queue completion entry from fast-path
8752 * event queue for FCP command response completion.
8753 **/
8754static void
8755lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8756 struct lpfc_wcqe_complete *wcqe)
8757{
8758 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8759 struct lpfc_iocbq *cmdiocbq;
8760 struct lpfc_iocbq irspiocbq;
8761 unsigned long iflags;
8762
8763 spin_lock_irqsave(&phba->hbalock, iflags);
8764 pring->stats.iocb_event++;
8765 spin_unlock_irqrestore(&phba->hbalock, iflags);
8766
8767 /* Check for response status */
8768 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8769 /* If resource errors reported from HBA, reduce queue
8770 * depth of the SCSI device.
8771 */
8772 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8773 IOSTAT_LOCAL_REJECT) &&
8774 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8775 phba->lpfc_rampdown_queue_depth(phba);
8776 }
8777 /* Log the error status */
8778 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8779 "0373 FCP complete error: status=x%x, "
8780 "hw_status=x%x, total_data_specified=%d, "
8781 "parameter=x%x, word3=x%x\n",
8782 bf_get(lpfc_wcqe_c_status, wcqe),
8783 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8784 wcqe->total_data_placed, wcqe->parameter,
8785 wcqe->word3);
8786 }
8787
8788 /* Look up the FCP command IOCB and create pseudo response IOCB */
8789 spin_lock_irqsave(&phba->hbalock, iflags);
8790 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8791 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8792 spin_unlock_irqrestore(&phba->hbalock, iflags);
8793 if (unlikely(!cmdiocbq)) {
8794 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8795 "0374 FCP complete with no corresponding "
8796 "cmdiocb: iotag (%d)\n",
8797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8798 return;
8799 }
8800 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8802 "0375 FCP cmdiocb not callback function "
8803 "iotag: (%d)\n",
8804 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8805 return;
8806 }
8807
8808 /* Fake the irspiocb and copy necessary response information */
8809 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8810
8811 /* Pass the cmd_iocb and the rsp state to the upper layer */
8812 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8813}
8814
8815/**
8816 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8817 * @phba: Pointer to HBA context object.
8818 * @cq: Pointer to completion queue.
8819 * @wcqe: Pointer to work-queue completion queue entry.
8820 *
8821 * This routine handles an fast-path WQ entry comsumed event by invoking the
8822 * proper WQ release routine to the slow-path WQ.
8823 **/
8824static void
8825lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8826 struct lpfc_wcqe_release *wcqe)
8827{
8828 struct lpfc_queue *childwq;
8829 bool wqid_matched = false;
8830 uint16_t fcp_wqid;
8831
8832 /* Check for fast-path FCP work queue release */
8833 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8834 list_for_each_entry(childwq, &cq->child_list, list) {
8835 if (childwq->queue_id == fcp_wqid) {
8836 lpfc_sli4_wq_release(childwq,
8837 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8838 wqid_matched = true;
8839 break;
8840 }
8841 }
8842 /* Report warning log message if no match found */
8843 if (wqid_matched != true)
8844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8845 "2580 Fast-path wqe consume event carries "
8846 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8847}
8848
8849/**
8850 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8851 * @cq: Pointer to the completion queue.
8852 * @eqe: Pointer to fast-path completion queue entry.
8853 *
8854 * This routine process a fast-path work queue completion entry from fast-path
8855 * event queue for FCP command response completion.
8856 **/
8857static int
8858lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8859 struct lpfc_cqe *cqe)
8860{
8861 struct lpfc_wcqe_release wcqe;
8862 bool workposted = false;
8863
8864 /* Copy the work queue CQE and convert endian order if needed */
8865 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8866
8867 /* Check and process for different type of WCQE and dispatch */
8868 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8869 case CQE_CODE_COMPL_WQE:
8870 /* Process the WQ complete event */
8871 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8872 (struct lpfc_wcqe_complete *)&wcqe);
8873 break;
8874 case CQE_CODE_RELEASE_WQE:
8875 /* Process the WQ release event */
8876 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
8877 (struct lpfc_wcqe_release *)&wcqe);
8878 break;
8879 case CQE_CODE_XRI_ABORTED:
8880 /* Process the WQ XRI abort event */
8881 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8882 (struct sli4_wcqe_xri_aborted *)&wcqe);
8883 break;
8884 default:
8885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8886 "0144 Not a valid WCQE code: x%x\n",
8887 bf_get(lpfc_wcqe_c_code, &wcqe));
8888 break;
8889 }
8890 return workposted;
8891}
8892
8893/**
8894 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
8895 * @phba: Pointer to HBA context object.
8896 * @eqe: Pointer to fast-path event queue entry.
8897 *
8898 * This routine process a event queue entry from the fast-path event queue.
8899 * It will check the MajorCode and MinorCode to determine this is for a
8900 * completion event on a completion queue, if not, an error shall be logged
8901 * and just return. Otherwise, it will get to the corresponding completion
8902 * queue and process all the entries on the completion queue, rearm the
8903 * completion queue, and then return.
8904 **/
8905static void
8906lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
8907 uint32_t fcp_cqidx)
8908{
8909 struct lpfc_queue *cq;
8910 struct lpfc_cqe *cqe;
8911 bool workposted = false;
8912 uint16_t cqid;
8913 int ecount = 0;
8914
8915 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
8916 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8918 "0366 Not a valid fast-path completion "
8919 "event: majorcode=x%x, minorcode=x%x\n",
8920 bf_get(lpfc_eqe_major_code, eqe),
8921 bf_get(lpfc_eqe_minor_code, eqe));
8922 return;
8923 }
8924
8925 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
8926 if (unlikely(!cq)) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8928 "0367 Fast-path completion queue does not "
8929 "exist\n");
8930 return;
8931 }
8932
8933 /* Get the reference to the corresponding CQ */
8934 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8935 if (unlikely(cqid != cq->queue_id)) {
8936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8937 "0368 Miss-matched fast-path completion "
8938 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
8939 cqid, cq->queue_id);
8940 return;
8941 }
8942
8943 /* Process all the entries to the CQ */
8944 while ((cqe = lpfc_sli4_cq_get(cq))) {
8945 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
8946 if (!(++ecount % LPFC_GET_QE_REL_INT))
8947 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8948 }
8949
8950 /* Catch the no cq entry condition */
8951 if (unlikely(ecount == 0))
8952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8953 "0369 No entry from fast-path completion "
8954 "queue fcpcqid=%d\n", cq->queue_id);
8955
8956 /* In any case, flash and re-arm the CQ */
8957 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8958
8959 /* wake up worker thread if there are works to be done */
8960 if (workposted)
8961 lpfc_worker_wake_up(phba);
8962}
8963
8964static void
8965lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
8966{
8967 struct lpfc_eqe *eqe;
8968
8969 /* walk all the EQ entries and drop on the floor */
8970 while ((eqe = lpfc_sli4_eq_get(eq)))
8971 ;
8972
8973 /* Clear and re-arm the EQ */
8974 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
8975}
8976
8977/**
8978 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
8979 * @irq: Interrupt number.
8980 * @dev_id: The device context pointer.
8981 *
8982 * This function is directly called from the PCI layer as an interrupt
8983 * service routine when device with SLI-4 interface spec is enabled with
8984 * MSI-X multi-message interrupt mode and there are slow-path events in
8985 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8986 * interrupt mode, this function is called as part of the device-level
8987 * interrupt handler. When the PCI slot is in error recovery or the HBA is
8988 * undergoing initialization, the interrupt handler will not process the
8989 * interrupt. The link attention and ELS ring attention events are handled
8990 * by the worker thread. The interrupt handler signals the worker thread
8991 * and returns for these events. This function is called without any lock
8992 * held. It gets the hbalock to access and update SLI data structures.
8993 *
8994 * This function returns IRQ_HANDLED when interrupt is handled else it
8995 * returns IRQ_NONE.
8996 **/
8997irqreturn_t
8998lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
8999{
9000 struct lpfc_hba *phba;
9001 struct lpfc_queue *speq;
9002 struct lpfc_eqe *eqe;
9003 unsigned long iflag;
9004 int ecount = 0;
9005
9006 /*
9007 * Get the driver's phba structure from the dev_id
9008 */
9009 phba = (struct lpfc_hba *)dev_id;
9010
9011 if (unlikely(!phba))
9012 return IRQ_NONE;
9013
9014 /* Get to the EQ struct associated with this vector */
9015 speq = phba->sli4_hba.sp_eq;
9016
9017 /* Check device state for handling interrupt */
9018 if (unlikely(lpfc_intr_state_check(phba))) {
9019 /* Check again for link_state with lock held */
9020 spin_lock_irqsave(&phba->hbalock, iflag);
9021 if (phba->link_state < LPFC_LINK_DOWN)
9022 /* Flush, clear interrupt, and rearm the EQ */
9023 lpfc_sli4_eq_flush(phba, speq);
9024 spin_unlock_irqrestore(&phba->hbalock, iflag);
9025 return IRQ_NONE;
9026 }
9027
9028 /*
9029 * Process all the event on FCP slow-path EQ
9030 */
9031 while ((eqe = lpfc_sli4_eq_get(speq))) {
9032 lpfc_sli4_sp_handle_eqe(phba, eqe);
9033 if (!(++ecount % LPFC_GET_QE_REL_INT))
9034 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9035 }
9036
9037 /* Always clear and re-arm the slow-path EQ */
9038 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9039
9040 /* Catch the no cq entry condition */
9041 if (unlikely(ecount == 0)) {
9042 if (phba->intr_type == MSIX)
9043 /* MSI-X treated interrupt served as no EQ share INT */
9044 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9045 "0357 MSI-X interrupt with no EQE\n");
9046 else
9047 /* Non MSI-X treated on interrupt as EQ share INT */
9048 return IRQ_NONE;
9049 }
9050
9051 return IRQ_HANDLED;
9052} /* lpfc_sli4_sp_intr_handler */
9053
9054/**
9055 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9056 * @irq: Interrupt number.
9057 * @dev_id: The device context pointer.
9058 *
9059 * This function is directly called from the PCI layer as an interrupt
9060 * service routine when device with SLI-4 interface spec is enabled with
9061 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9062 * ring event in the HBA. However, when the device is enabled with either
9063 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9064 * device-level interrupt handler. When the PCI slot is in error recovery
9065 * or the HBA is undergoing initialization, the interrupt handler will not
9066 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9067 * the intrrupt context. This function is called without any lock held.
9068 * It gets the hbalock to access and update SLI data structures. Note that,
9069 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9070 * equal to that of FCP CQ index.
9071 *
9072 * This function returns IRQ_HANDLED when interrupt is handled else it
9073 * returns IRQ_NONE.
9074 **/
9075irqreturn_t
9076lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9077{
9078 struct lpfc_hba *phba;
9079 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9080 struct lpfc_queue *fpeq;
9081 struct lpfc_eqe *eqe;
9082 unsigned long iflag;
9083 int ecount = 0;
9084 uint32_t fcp_eqidx;
9085
9086 /* Get the driver's phba structure from the dev_id */
9087 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9088 phba = fcp_eq_hdl->phba;
9089 fcp_eqidx = fcp_eq_hdl->idx;
9090
9091 if (unlikely(!phba))
9092 return IRQ_NONE;
9093
9094 /* Get to the EQ struct associated with this vector */
9095 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9096
9097 /* Check device state for handling interrupt */
9098 if (unlikely(lpfc_intr_state_check(phba))) {
9099 /* Check again for link_state with lock held */
9100 spin_lock_irqsave(&phba->hbalock, iflag);
9101 if (phba->link_state < LPFC_LINK_DOWN)
9102 /* Flush, clear interrupt, and rearm the EQ */
9103 lpfc_sli4_eq_flush(phba, fpeq);
9104 spin_unlock_irqrestore(&phba->hbalock, iflag);
9105 return IRQ_NONE;
9106 }
9107
9108 /*
9109 * Process all the event on FCP fast-path EQ
9110 */
9111 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9112 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9113 if (!(++ecount % LPFC_GET_QE_REL_INT))
9114 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9115 }
9116
9117 /* Always clear and re-arm the fast-path EQ */
9118 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9119
9120 if (unlikely(ecount == 0)) {
9121 if (phba->intr_type == MSIX)
9122 /* MSI-X treated interrupt served as no EQ share INT */
9123 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9124 "0358 MSI-X interrupt with no EQE\n");
9125 else
9126 /* Non MSI-X treated on interrupt as EQ share INT */
9127 return IRQ_NONE;
9128 }
9129
9130 return IRQ_HANDLED;
9131} /* lpfc_sli4_fp_intr_handler */
9132
9133/**
9134 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9135 * @irq: Interrupt number.
9136 * @dev_id: The device context pointer.
9137 *
9138 * This function is the device-level interrupt handler to device with SLI-4
9139 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9140 * interrupt mode is enabled and there is an event in the HBA which requires
9141 * driver attention. This function invokes the slow-path interrupt attention
9142 * handling function and fast-path interrupt attention handling function in
9143 * turn to process the relevant HBA attention events. This function is called
9144 * without any lock held. It gets the hbalock to access and update SLI data
9145 * structures.
9146 *
9147 * This function returns IRQ_HANDLED when interrupt is handled, else it
9148 * returns IRQ_NONE.
9149 **/
9150irqreturn_t
9151lpfc_sli4_intr_handler(int irq, void *dev_id)
9152{
9153 struct lpfc_hba *phba;
9154 irqreturn_t sp_irq_rc, fp_irq_rc;
9155 bool fp_handled = false;
9156 uint32_t fcp_eqidx;
9157
9158 /* Get the driver's phba structure from the dev_id */
9159 phba = (struct lpfc_hba *)dev_id;
9160
9161 if (unlikely(!phba))
9162 return IRQ_NONE;
9163
9164 /*
9165 * Invokes slow-path host attention interrupt handling as appropriate.
9166 */
9167 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9168
9169 /*
9170 * Invoke fast-path host attention interrupt handling as appropriate.
9171 */
9172 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9173 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9174 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9175 if (fp_irq_rc == IRQ_HANDLED)
9176 fp_handled |= true;
9177 }
9178
9179 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9180} /* lpfc_sli4_intr_handler */
9181
9182/**
9183 * lpfc_sli4_queue_free - free a queue structure and associated memory
9184 * @queue: The queue structure to free.
9185 *
9186 * This function frees a queue structure and the DMAable memeory used for
9187 * the host resident queue. This function must be called after destroying the
9188 * queue on the HBA.
9189 **/
9190void
9191lpfc_sli4_queue_free(struct lpfc_queue *queue)
9192{
9193 struct lpfc_dmabuf *dmabuf;
9194
9195 if (!queue)
9196 return;
9197
9198 while (!list_empty(&queue->page_list)) {
9199 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9200 list);
9201 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9202 dmabuf->virt, dmabuf->phys);
9203 kfree(dmabuf);
9204 }
9205 kfree(queue);
9206 return;
9207}
9208
9209/**
9210 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9211 * @phba: The HBA that this queue is being created on.
9212 * @entry_size: The size of each queue entry for this queue.
9213 * @entry count: The number of entries that this queue will handle.
9214 *
9215 * This function allocates a queue structure and the DMAable memory used for
9216 * the host resident queue. This function must be called before creating the
9217 * queue on the HBA.
9218 **/
9219struct lpfc_queue *
9220lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9221 uint32_t entry_count)
9222{
9223 struct lpfc_queue *queue;
9224 struct lpfc_dmabuf *dmabuf;
9225 int x, total_qe_count;
9226 void *dma_pointer;
9227
9228
9229 queue = kzalloc(sizeof(struct lpfc_queue) +
9230 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9231 if (!queue)
9232 return NULL;
9233 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9234 INIT_LIST_HEAD(&queue->list);
9235 INIT_LIST_HEAD(&queue->page_list);
9236 INIT_LIST_HEAD(&queue->child_list);
9237 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9238 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9239 if (!dmabuf)
9240 goto out_fail;
9241 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9242 PAGE_SIZE, &dmabuf->phys,
9243 GFP_KERNEL);
9244 if (!dmabuf->virt) {
9245 kfree(dmabuf);
9246 goto out_fail;
9247 }
9248 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */
9251 dma_pointer = dmabuf->virt;
9252 for (; total_qe_count < entry_count &&
9253 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9254 total_qe_count++, dma_pointer += entry_size) {
9255 queue->qe[total_qe_count].address = dma_pointer;
9256 }
9257 }
9258 queue->entry_size = entry_size;
9259 queue->entry_count = entry_count;
9260 queue->phba = phba;
9261
9262 return queue;
9263out_fail:
9264 lpfc_sli4_queue_free(queue);
9265 return NULL;
9266}
9267
9268/**
9269 * lpfc_eq_create - Create an Event Queue on the HBA
9270 * @phba: HBA structure that indicates port to create a queue on.
9271 * @eq: The queue structure to use to create the event queue.
9272 * @imax: The maximum interrupt per second limit.
9273 *
9274 * This function creates an event queue, as detailed in @eq, on a port,
9275 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9276 *
9277 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9278 * is used to get the entry count and entry size that are necessary to
9279 * determine the number of pages to allocate and use for this queue. This
9280 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9281 * event queue. This function is asynchronous and will wait for the mailbox
9282 * command to finish before continuing.
9283 *
9284 * On success this function will return a zero. If unable to allocate enough
9285 * memory this function will return ENOMEM. If the queue create mailbox command
9286 * fails this function will return ENXIO.
9287 **/
9288uint32_t
9289lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9290{
9291 struct lpfc_mbx_eq_create *eq_create;
9292 LPFC_MBOXQ_t *mbox;
9293 int rc, length, status = 0;
9294 struct lpfc_dmabuf *dmabuf;
9295 uint32_t shdr_status, shdr_add_status;
9296 union lpfc_sli4_cfg_shdr *shdr;
9297 uint16_t dmult;
9298
9299 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9300 if (!mbox)
9301 return -ENOMEM;
9302 length = (sizeof(struct lpfc_mbx_eq_create) -
9303 sizeof(struct lpfc_sli4_cfg_mhdr));
9304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9305 LPFC_MBOX_OPCODE_EQ_CREATE,
9306 length, LPFC_SLI4_MBX_EMBED);
9307 eq_create = &mbox->u.mqe.un.eq_create;
9308 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9309 eq->page_count);
9310 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9311 LPFC_EQE_SIZE);
9312 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9313 /* Calculate delay multiper from maximum interrupt per second */
9314 dmult = LPFC_DMULT_CONST/imax - 1;
9315 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9316 dmult);
9317 switch (eq->entry_count) {
9318 default:
9319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9320 "0360 Unsupported EQ count. (%d)\n",
9321 eq->entry_count);
9322 if (eq->entry_count < 256)
9323 return -EINVAL;
9324 /* otherwise default to smallest count (drop through) */
9325 case 256:
9326 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9327 LPFC_EQ_CNT_256);
9328 break;
9329 case 512:
9330 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9331 LPFC_EQ_CNT_512);
9332 break;
9333 case 1024:
9334 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9335 LPFC_EQ_CNT_1024);
9336 break;
9337 case 2048:
9338 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9339 LPFC_EQ_CNT_2048);
9340 break;
9341 case 4096:
9342 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9343 LPFC_EQ_CNT_4096);
9344 break;
9345 }
9346 list_for_each_entry(dmabuf, &eq->page_list, list) {
9347 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9348 putPaddrLow(dmabuf->phys);
9349 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9350 putPaddrHigh(dmabuf->phys);
9351 }
9352 mbox->vport = phba->pport;
9353 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9354 mbox->context1 = NULL;
9355 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9356 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9359 if (shdr_status || shdr_add_status || rc) {
9360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9361 "2500 EQ_CREATE mailbox failed with "
9362 "status x%x add_status x%x, mbx status x%x\n",
9363 shdr_status, shdr_add_status, rc);
9364 status = -ENXIO;
9365 }
9366 eq->type = LPFC_EQ;
9367 eq->subtype = LPFC_NONE;
9368 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9369 if (eq->queue_id == 0xFFFF)
9370 status = -ENXIO;
9371 eq->host_index = 0;
9372 eq->hba_index = 0;
9373
9374 if (rc != MBX_TIMEOUT)
9375 mempool_free(mbox, phba->mbox_mem_pool);
9376 return status;
9377}
9378
9379/**
9380 * lpfc_cq_create - Create a Completion Queue on the HBA
9381 * @phba: HBA structure that indicates port to create a queue on.
9382 * @cq: The queue structure to use to create the completion queue.
9383 * @eq: The event queue to bind this completion queue to.
9384 *
9385 * This function creates a completion queue, as detailed in @wq, on a port,
9386 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9387 *
9388 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9389 * is used to get the entry count and entry size that are necessary to
9390 * determine the number of pages to allocate and use for this queue. The @eq
9391 * is used to indicate which event queue to bind this completion queue to. This
9392 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9393 * completion queue. This function is asynchronous and will wait for the mailbox
9394 * command to finish before continuing.
9395 *
9396 * On success this function will return a zero. If unable to allocate enough
9397 * memory this function will return ENOMEM. If the queue create mailbox command
9398 * fails this function will return ENXIO.
9399 **/
9400uint32_t
9401lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9402 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9403{
9404 struct lpfc_mbx_cq_create *cq_create;
9405 struct lpfc_dmabuf *dmabuf;
9406 LPFC_MBOXQ_t *mbox;
9407 int rc, length, status = 0;
9408 uint32_t shdr_status, shdr_add_status;
9409 union lpfc_sli4_cfg_shdr *shdr;
9410
9411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9412 if (!mbox)
9413 return -ENOMEM;
9414 length = (sizeof(struct lpfc_mbx_cq_create) -
9415 sizeof(struct lpfc_sli4_cfg_mhdr));
9416 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9417 LPFC_MBOX_OPCODE_CQ_CREATE,
9418 length, LPFC_SLI4_MBX_EMBED);
9419 cq_create = &mbox->u.mqe.un.cq_create;
9420 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9421 cq->page_count);
9422 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9423 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9424 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9425 switch (cq->entry_count) {
9426 default:
9427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9428 "0361 Unsupported CQ count. (%d)\n",
9429 cq->entry_count);
9430 if (cq->entry_count < 256)
9431 return -EINVAL;
9432 /* otherwise default to smallest count (drop through) */
9433 case 256:
9434 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9435 LPFC_CQ_CNT_256);
9436 break;
9437 case 512:
9438 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9439 LPFC_CQ_CNT_512);
9440 break;
9441 case 1024:
9442 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9443 LPFC_CQ_CNT_1024);
9444 break;
9445 }
9446 list_for_each_entry(dmabuf, &cq->page_list, list) {
9447 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9448 putPaddrLow(dmabuf->phys);
9449 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9450 putPaddrHigh(dmabuf->phys);
9451 }
9452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9453
9454 /* The IOCTL status is embedded in the mailbox subheader. */
9455 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9458 if (shdr_status || shdr_add_status || rc) {
9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460 "2501 CQ_CREATE mailbox failed with "
9461 "status x%x add_status x%x, mbx status x%x\n",
9462 shdr_status, shdr_add_status, rc);
9463 status = -ENXIO;
9464 goto out;
9465 }
9466 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9467 if (cq->queue_id == 0xFFFF) {
9468 status = -ENXIO;
9469 goto out;
9470 }
9471 /* link the cq onto the parent eq child list */
9472 list_add_tail(&cq->list, &eq->child_list);
9473 /* Set up completion queue's type and subtype */
9474 cq->type = type;
9475 cq->subtype = subtype;
9476 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9477 cq->host_index = 0;
9478 cq->hba_index = 0;
9479out:
9480
9481 if (rc != MBX_TIMEOUT)
9482 mempool_free(mbox, phba->mbox_mem_pool);
9483 return status;
9484}
9485
9486/**
9487 * lpfc_mq_create - Create a mailbox Queue on the HBA
9488 * @phba: HBA structure that indicates port to create a queue on.
9489 * @mq: The queue structure to use to create the mailbox queue.
9490 *
9491 * This function creates a mailbox queue, as detailed in @mq, on a port,
9492 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9493 *
9494 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9495 * is used to get the entry count and entry size that are necessary to
9496 * determine the number of pages to allocate and use for this queue. This
9497 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9498 * mailbox queue. This function is asynchronous and will wait for the mailbox
9499 * command to finish before continuing.
9500 *
9501 * On success this function will return a zero. If unable to allocate enough
9502 * memory this function will return ENOMEM. If the queue create mailbox command
9503 * fails this function will return ENXIO.
9504 **/
9505uint32_t
9506lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9507 struct lpfc_queue *cq, uint32_t subtype)
9508{
9509 struct lpfc_mbx_mq_create *mq_create;
9510 struct lpfc_dmabuf *dmabuf;
9511 LPFC_MBOXQ_t *mbox;
9512 int rc, length, status = 0;
9513 uint32_t shdr_status, shdr_add_status;
9514 union lpfc_sli4_cfg_shdr *shdr;
9515
9516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9517 if (!mbox)
9518 return -ENOMEM;
9519 length = (sizeof(struct lpfc_mbx_mq_create) -
9520 sizeof(struct lpfc_sli4_cfg_mhdr));
9521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9522 LPFC_MBOX_OPCODE_MQ_CREATE,
9523 length, LPFC_SLI4_MBX_EMBED);
9524 mq_create = &mbox->u.mqe.un.mq_create;
9525 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9526 mq->page_count);
9527 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9528 cq->queue_id);
9529 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9530 switch (mq->entry_count) {
9531 default:
9532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9533 "0362 Unsupported MQ count. (%d)\n",
9534 mq->entry_count);
9535 if (mq->entry_count < 16)
9536 return -EINVAL;
9537 /* otherwise default to smallest count (drop through) */
9538 case 16:
9539 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9540 LPFC_MQ_CNT_16);
9541 break;
9542 case 32:
9543 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9544 LPFC_MQ_CNT_32);
9545 break;
9546 case 64:
9547 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9548 LPFC_MQ_CNT_64);
9549 break;
9550 case 128:
9551 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9552 LPFC_MQ_CNT_128);
9553 break;
9554 }
9555 list_for_each_entry(dmabuf, &mq->page_list, list) {
9556 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9557 putPaddrLow(dmabuf->phys);
9558 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9559 putPaddrHigh(dmabuf->phys);
9560 }
9561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9562 /* The IOCTL status is embedded in the mailbox subheader. */
9563 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9566 if (shdr_status || shdr_add_status || rc) {
9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9568 "2502 MQ_CREATE mailbox failed with "
9569 "status x%x add_status x%x, mbx status x%x\n",
9570 shdr_status, shdr_add_status, rc);
9571 status = -ENXIO;
9572 goto out;
9573 }
9574 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9575 if (mq->queue_id == 0xFFFF) {
9576 status = -ENXIO;
9577 goto out;
9578 }
9579 mq->type = LPFC_MQ;
9580 mq->subtype = subtype;
9581 mq->host_index = 0;
9582 mq->hba_index = 0;
9583
9584 /* link the mq onto the parent cq child list */
9585 list_add_tail(&mq->list, &cq->child_list);
9586out:
9587 if (rc != MBX_TIMEOUT)
9588 mempool_free(mbox, phba->mbox_mem_pool);
9589 return status;
9590}
9591
9592/**
9593 * lpfc_wq_create - Create a Work Queue on the HBA
9594 * @phba: HBA structure that indicates port to create a queue on.
9595 * @wq: The queue structure to use to create the work queue.
9596 * @cq: The completion queue to bind this work queue to.
9597 * @subtype: The subtype of the work queue indicating its functionality.
9598 *
9599 * This function creates a work queue, as detailed in @wq, on a port, described
9600 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9601 *
9602 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9603 * is used to get the entry count and entry size that are necessary to
9604 * determine the number of pages to allocate and use for this queue. The @cq
9605 * is used to indicate which completion queue to bind this work queue to. This
9606 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9607 * work queue. This function is asynchronous and will wait for the mailbox
9608 * command to finish before continuing.
9609 *
9610 * On success this function will return a zero. If unable to allocate enough
9611 * memory this function will return ENOMEM. If the queue create mailbox command
9612 * fails this function will return ENXIO.
9613 **/
9614uint32_t
9615lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9616 struct lpfc_queue *cq, uint32_t subtype)
9617{
9618 struct lpfc_mbx_wq_create *wq_create;
9619 struct lpfc_dmabuf *dmabuf;
9620 LPFC_MBOXQ_t *mbox;
9621 int rc, length, status = 0;
9622 uint32_t shdr_status, shdr_add_status;
9623 union lpfc_sli4_cfg_shdr *shdr;
9624
9625 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9626 if (!mbox)
9627 return -ENOMEM;
9628 length = (sizeof(struct lpfc_mbx_wq_create) -
9629 sizeof(struct lpfc_sli4_cfg_mhdr));
9630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9631 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9632 length, LPFC_SLI4_MBX_EMBED);
9633 wq_create = &mbox->u.mqe.un.wq_create;
9634 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9635 wq->page_count);
9636 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9637 cq->queue_id);
9638 list_for_each_entry(dmabuf, &wq->page_list, list) {
9639 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9640 putPaddrLow(dmabuf->phys);
9641 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9642 putPaddrHigh(dmabuf->phys);
9643 }
9644 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9645 /* The IOCTL status is embedded in the mailbox subheader. */
9646 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9649 if (shdr_status || shdr_add_status || rc) {
9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9651 "2503 WQ_CREATE mailbox failed with "
9652 "status x%x add_status x%x, mbx status x%x\n",
9653 shdr_status, shdr_add_status, rc);
9654 status = -ENXIO;
9655 goto out;
9656 }
9657 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9658 if (wq->queue_id == 0xFFFF) {
9659 status = -ENXIO;
9660 goto out;
9661 }
9662 wq->type = LPFC_WQ;
9663 wq->subtype = subtype;
9664 wq->host_index = 0;
9665 wq->hba_index = 0;
9666
9667 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list);
9669out:
9670 if (rc == MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status;
9673}
9674
9675/**
9676 * lpfc_rq_create - Create a Receive Queue on the HBA
9677 * @phba: HBA structure that indicates port to create a queue on.
9678 * @hrq: The queue structure to use to create the header receive queue.
9679 * @drq: The queue structure to use to create the data receive queue.
9680 * @cq: The completion queue to bind this work queue to.
9681 *
9682 * This function creates a receive buffer queue pair , as detailed in @hrq and
9683 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9684 * to the HBA.
9685 *
9686 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9687 * struct is used to get the entry count that is necessary to determine the
9688 * number of pages to use for this queue. The @cq is used to indicate which
9689 * completion queue to bind received buffers that are posted to these queues to.
9690 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9691 * receive queue pair. This function is asynchronous and will wait for the
9692 * mailbox command to finish before continuing.
9693 *
9694 * On success this function will return a zero. If unable to allocate enough
9695 * memory this function will return ENOMEM. If the queue create mailbox command
9696 * fails this function will return ENXIO.
9697 **/
9698uint32_t
9699lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9700 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9701{
9702 struct lpfc_mbx_rq_create *rq_create;
9703 struct lpfc_dmabuf *dmabuf;
9704 LPFC_MBOXQ_t *mbox;
9705 int rc, length, status = 0;
9706 uint32_t shdr_status, shdr_add_status;
9707 union lpfc_sli4_cfg_shdr *shdr;
9708
9709 if (hrq->entry_count != drq->entry_count)
9710 return -EINVAL;
9711 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9712 if (!mbox)
9713 return -ENOMEM;
9714 length = (sizeof(struct lpfc_mbx_rq_create) -
9715 sizeof(struct lpfc_sli4_cfg_mhdr));
9716 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9717 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9718 length, LPFC_SLI4_MBX_EMBED);
9719 rq_create = &mbox->u.mqe.un.rq_create;
9720 switch (hrq->entry_count) {
9721 default:
9722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9723 "2535 Unsupported RQ count. (%d)\n",
9724 hrq->entry_count);
9725 if (hrq->entry_count < 512)
9726 return -EINVAL;
9727 /* otherwise default to smallest count (drop through) */
9728 case 512:
9729 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9730 LPFC_RQ_RING_SIZE_512);
9731 break;
9732 case 1024:
9733 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9734 LPFC_RQ_RING_SIZE_1024);
9735 break;
9736 case 2048:
9737 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9738 LPFC_RQ_RING_SIZE_2048);
9739 break;
9740 case 4096:
9741 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9742 LPFC_RQ_RING_SIZE_4096);
9743 break;
9744 }
9745 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9746 cq->queue_id);
9747 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9748 hrq->page_count);
9749 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9750 LPFC_HDR_BUF_SIZE);
9751 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9752 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9753 putPaddrLow(dmabuf->phys);
9754 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9755 putPaddrHigh(dmabuf->phys);
9756 }
9757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9758 /* The IOCTL status is embedded in the mailbox subheader. */
9759 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9760 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9761 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9762 if (shdr_status || shdr_add_status || rc) {
9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9764 "2504 RQ_CREATE mailbox failed with "
9765 "status x%x add_status x%x, mbx status x%x\n",
9766 shdr_status, shdr_add_status, rc);
9767 status = -ENXIO;
9768 goto out;
9769 }
9770 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9771 if (hrq->queue_id == 0xFFFF) {
9772 status = -ENXIO;
9773 goto out;
9774 }
9775 hrq->type = LPFC_HRQ;
9776 hrq->subtype = subtype;
9777 hrq->host_index = 0;
9778 hrq->hba_index = 0;
9779
9780 /* now create the data queue */
9781 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9782 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9783 length, LPFC_SLI4_MBX_EMBED);
9784 switch (drq->entry_count) {
9785 default:
9786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9787 "2536 Unsupported RQ count. (%d)\n",
9788 drq->entry_count);
9789 if (drq->entry_count < 512)
9790 return -EINVAL;
9791 /* otherwise default to smallest count (drop through) */
9792 case 512:
9793 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9794 LPFC_RQ_RING_SIZE_512);
9795 break;
9796 case 1024:
9797 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9798 LPFC_RQ_RING_SIZE_1024);
9799 break;
9800 case 2048:
9801 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9802 LPFC_RQ_RING_SIZE_2048);
9803 break;
9804 case 4096:
9805 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9806 LPFC_RQ_RING_SIZE_4096);
9807 break;
9808 }
9809 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9810 cq->queue_id);
9811 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9812 drq->page_count);
9813 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9814 LPFC_DATA_BUF_SIZE);
9815 list_for_each_entry(dmabuf, &drq->page_list, list) {
9816 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9817 putPaddrLow(dmabuf->phys);
9818 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9819 putPaddrHigh(dmabuf->phys);
9820 }
9821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9822 /* The IOCTL status is embedded in the mailbox subheader. */
9823 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9826 if (shdr_status || shdr_add_status || rc) {
9827 status = -ENXIO;
9828 goto out;
9829 }
9830 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9831 if (drq->queue_id == 0xFFFF) {
9832 status = -ENXIO;
9833 goto out;
9834 }
9835 drq->type = LPFC_DRQ;
9836 drq->subtype = subtype;
9837 drq->host_index = 0;
9838 drq->hba_index = 0;
9839
9840 /* link the header and data RQs onto the parent cq child list */
9841 list_add_tail(&hrq->list, &cq->child_list);
9842 list_add_tail(&drq->list, &cq->child_list);
9843
9844out:
9845 if (rc != MBX_TIMEOUT)
9846 mempool_free(mbox, phba->mbox_mem_pool);
9847 return status;
9848}
9849
9850/**
9851 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9852 * @eq: The queue structure associated with the queue to destroy.
9853 *
9854 * This function destroys a queue, as detailed in @eq by sending an mailbox
9855 * command, specific to the type of queue, to the HBA.
9856 *
9857 * The @eq struct is used to get the queue ID of the queue to destroy.
9858 *
9859 * On success this function will return a zero. If the queue destroy mailbox
9860 * command fails this function will return ENXIO.
9861 **/
9862uint32_t
9863lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9864{
9865 LPFC_MBOXQ_t *mbox;
9866 int rc, length, status = 0;
9867 uint32_t shdr_status, shdr_add_status;
9868 union lpfc_sli4_cfg_shdr *shdr;
9869
9870 if (!eq)
9871 return -ENODEV;
9872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
9873 if (!mbox)
9874 return -ENOMEM;
9875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
9876 sizeof(struct lpfc_sli4_cfg_mhdr));
9877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9878 LPFC_MBOX_OPCODE_EQ_DESTROY,
9879 length, LPFC_SLI4_MBX_EMBED);
9880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
9881 eq->queue_id);
9882 mbox->vport = eq->phba->pport;
9883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9884
9885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *)
9888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
9889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9891 if (shdr_status || shdr_add_status || rc) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9893 "2505 EQ_DESTROY mailbox failed with "
9894 "status x%x add_status x%x, mbx status x%x\n",
9895 shdr_status, shdr_add_status, rc);
9896 status = -ENXIO;
9897 }
9898
9899 /* Remove eq from any list */
9900 list_del_init(&eq->list);
9901 if (rc != MBX_TIMEOUT)
9902 mempool_free(mbox, eq->phba->mbox_mem_pool);
9903 return status;
9904}
9905
9906/**
9907 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
9908 * @cq: The queue structure associated with the queue to destroy.
9909 *
9910 * This function destroys a queue, as detailed in @cq by sending an mailbox
9911 * command, specific to the type of queue, to the HBA.
9912 *
9913 * The @cq struct is used to get the queue ID of the queue to destroy.
9914 *
9915 * On success this function will return a zero. If the queue destroy mailbox
9916 * command fails this function will return ENXIO.
9917 **/
9918uint32_t
9919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
9920{
9921 LPFC_MBOXQ_t *mbox;
9922 int rc, length, status = 0;
9923 uint32_t shdr_status, shdr_add_status;
9924 union lpfc_sli4_cfg_shdr *shdr;
9925
9926 if (!cq)
9927 return -ENODEV;
9928 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
9929 if (!mbox)
9930 return -ENOMEM;
9931 length = (sizeof(struct lpfc_mbx_cq_destroy) -
9932 sizeof(struct lpfc_sli4_cfg_mhdr));
9933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9934 LPFC_MBOX_OPCODE_CQ_DESTROY,
9935 length, LPFC_SLI4_MBX_EMBED);
9936 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
9937 cq->queue_id);
9938 mbox->vport = cq->phba->pport;
9939 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9940 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
9941 /* The IOCTL status is embedded in the mailbox subheader. */
9942 shdr = (union lpfc_sli4_cfg_shdr *)
9943 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
9944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9946 if (shdr_status || shdr_add_status || rc) {
9947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9948 "2506 CQ_DESTROY mailbox failed with "
9949 "status x%x add_status x%x, mbx status x%x\n",
9950 shdr_status, shdr_add_status, rc);
9951 status = -ENXIO;
9952 }
9953 /* Remove cq from any list */
9954 list_del_init(&cq->list);
9955 if (rc != MBX_TIMEOUT)
9956 mempool_free(mbox, cq->phba->mbox_mem_pool);
9957 return status;
9958}
9959
9960/**
9961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
9962 * @qm: The queue structure associated with the queue to destroy.
9963 *
9964 * This function destroys a queue, as detailed in @mq by sending an mailbox
9965 * command, specific to the type of queue, to the HBA.
9966 *
9967 * The @mq struct is used to get the queue ID of the queue to destroy.
9968 *
9969 * On success this function will return a zero. If the queue destroy mailbox
9970 * command fails this function will return ENXIO.
9971 **/
9972uint32_t
9973lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
9974{
9975 LPFC_MBOXQ_t *mbox;
9976 int rc, length, status = 0;
9977 uint32_t shdr_status, shdr_add_status;
9978 union lpfc_sli4_cfg_shdr *shdr;
9979
9980 if (!mq)
9981 return -ENODEV;
9982 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
9983 if (!mbox)
9984 return -ENOMEM;
9985 length = (sizeof(struct lpfc_mbx_mq_destroy) -
9986 sizeof(struct lpfc_sli4_cfg_mhdr));
9987 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9988 LPFC_MBOX_OPCODE_MQ_DESTROY,
9989 length, LPFC_SLI4_MBX_EMBED);
9990 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
9991 mq->queue_id);
9992 mbox->vport = mq->phba->pport;
9993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9994 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
9995 /* The IOCTL status is embedded in the mailbox subheader. */
9996 shdr = (union lpfc_sli4_cfg_shdr *)
9997 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
9998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10000 if (shdr_status || shdr_add_status || rc) {
10001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10002 "2507 MQ_DESTROY mailbox failed with "
10003 "status x%x add_status x%x, mbx status x%x\n",
10004 shdr_status, shdr_add_status, rc);
10005 status = -ENXIO;
10006 }
10007 /* Remove mq from any list */
10008 list_del_init(&mq->list);
10009 if (rc != MBX_TIMEOUT)
10010 mempool_free(mbox, mq->phba->mbox_mem_pool);
10011 return status;
10012}
10013
10014/**
10015 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10016 * @wq: The queue structure associated with the queue to destroy.
10017 *
10018 * This function destroys a queue, as detailed in @wq by sending an mailbox
10019 * command, specific to the type of queue, to the HBA.
10020 *
10021 * The @wq struct is used to get the queue ID of the queue to destroy.
10022 *
10023 * On success this function will return a zero. If the queue destroy mailbox
10024 * command fails this function will return ENXIO.
10025 **/
10026uint32_t
10027lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10028{
10029 LPFC_MBOXQ_t *mbox;
10030 int rc, length, status = 0;
10031 uint32_t shdr_status, shdr_add_status;
10032 union lpfc_sli4_cfg_shdr *shdr;
10033
10034 if (!wq)
10035 return -ENODEV;
10036 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10037 if (!mbox)
10038 return -ENOMEM;
10039 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10040 sizeof(struct lpfc_sli4_cfg_mhdr));
10041 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10042 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10043 length, LPFC_SLI4_MBX_EMBED);
10044 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10045 wq->queue_id);
10046 mbox->vport = wq->phba->pport;
10047 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10048 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10049 shdr = (union lpfc_sli4_cfg_shdr *)
10050 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10053 if (shdr_status || shdr_add_status || rc) {
10054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10055 "2508 WQ_DESTROY mailbox failed with "
10056 "status x%x add_status x%x, mbx status x%x\n",
10057 shdr_status, shdr_add_status, rc);
10058 status = -ENXIO;
10059 }
10060 /* Remove wq from any list */
10061 list_del_init(&wq->list);
10062 if (rc != MBX_TIMEOUT)
10063 mempool_free(mbox, wq->phba->mbox_mem_pool);
10064 return status;
10065}
10066
10067/**
10068 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10069 * @rq: The queue structure associated with the queue to destroy.
10070 *
10071 * This function destroys a queue, as detailed in @rq by sending an mailbox
10072 * command, specific to the type of queue, to the HBA.
10073 *
10074 * The @rq struct is used to get the queue ID of the queue to destroy.
10075 *
10076 * On success this function will return a zero. If the queue destroy mailbox
10077 * command fails this function will return ENXIO.
10078 **/
10079uint32_t
10080lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10081 struct lpfc_queue *drq)
10082{
10083 LPFC_MBOXQ_t *mbox;
10084 int rc, length, status = 0;
10085 uint32_t shdr_status, shdr_add_status;
10086 union lpfc_sli4_cfg_shdr *shdr;
10087
10088 if (!hrq || !drq)
10089 return -ENODEV;
10090 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10091 if (!mbox)
10092 return -ENOMEM;
10093 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10094 sizeof(struct mbox_header));
10095 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10096 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10097 length, LPFC_SLI4_MBX_EMBED);
10098 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10099 hrq->queue_id);
10100 mbox->vport = hrq->phba->pport;
10101 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10102 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10103 /* The IOCTL status is embedded in the mailbox subheader. */
10104 shdr = (union lpfc_sli4_cfg_shdr *)
10105 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10106 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10107 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10108 if (shdr_status || shdr_add_status || rc) {
10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10110 "2509 RQ_DESTROY mailbox failed with "
10111 "status x%x add_status x%x, mbx status x%x\n",
10112 shdr_status, shdr_add_status, rc);
10113 if (rc != MBX_TIMEOUT)
10114 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10115 return -ENXIO;
10116 }
10117 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10118 drq->queue_id);
10119 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10120 shdr = (union lpfc_sli4_cfg_shdr *)
10121 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10122 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10123 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10124 if (shdr_status || shdr_add_status || rc) {
10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10126 "2510 RQ_DESTROY mailbox failed with "
10127 "status x%x add_status x%x, mbx status x%x\n",
10128 shdr_status, shdr_add_status, rc);
10129 status = -ENXIO;
10130 }
10131 list_del_init(&hrq->list);
10132 list_del_init(&drq->list);
10133 if (rc != MBX_TIMEOUT)
10134 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10135 return status;
10136}
10137
10138/**
10139 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10140 * @phba: The virtual port for which this call being executed.
10141 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10142 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10143 * @xritag: the xritag that ties this io to the SGL pages.
10144 *
10145 * This routine will post the sgl pages for the IO that has the xritag
10146 * that is in the iocbq structure. The xritag is assigned during iocbq
10147 * creation and persists for as long as the driver is loaded.
10148 * if the caller has fewer than 256 scatter gather segments to map then
10149 * pdma_phys_addr1 should be 0.
10150 * If the caller needs to map more than 256 scatter gather segment then
10151 * pdma_phys_addr1 should be a valid physical address.
10152 * physical address for SGLs must be 64 byte aligned.
10153 * If you are going to map 2 SGL's then the first one must have 256 entries
10154 * the second sgl can have between 1 and 256 entries.
10155 *
10156 * Return codes:
10157 * 0 - Success
10158 * -ENXIO, -ENOMEM - Failure
10159 **/
10160int
10161lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10162 dma_addr_t pdma_phys_addr0,
10163 dma_addr_t pdma_phys_addr1,
10164 uint16_t xritag)
10165{
10166 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10167 LPFC_MBOXQ_t *mbox;
10168 int rc;
10169 uint32_t shdr_status, shdr_add_status;
10170 union lpfc_sli4_cfg_shdr *shdr;
10171
10172 if (xritag == NO_XRI) {
10173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10174 "0364 Invalid param:\n");
10175 return -EINVAL;
10176 }
10177
10178 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10179 if (!mbox)
10180 return -ENOMEM;
10181
10182 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10183 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10184 sizeof(struct lpfc_mbx_post_sgl_pages) -
10185 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10186
10187 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10188 &mbox->u.mqe.un.post_sgl_pages;
10189 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10190 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10191
10192 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10193 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10194 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10195 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10196
10197 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10198 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10199 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10200 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10201 if (!phba->sli4_hba.intr_enable)
10202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10203 else
10204 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10205 /* The IOCTL status is embedded in the mailbox subheader. */
10206 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10207 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10208 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10209 if (rc != MBX_TIMEOUT)
10210 mempool_free(mbox, phba->mbox_mem_pool);
10211 if (shdr_status || shdr_add_status || rc) {
10212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10213 "2511 POST_SGL mailbox failed with "
10214 "status x%x add_status x%x, mbx status x%x\n",
10215 shdr_status, shdr_add_status, rc);
10216 rc = -ENXIO;
10217 }
10218 return 0;
10219}
10220/**
10221 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10222 * @phba: The virtual port for which this call being executed.
10223 *
10224 * This routine will remove all of the sgl pages registered with the hba.
10225 *
10226 * Return codes:
10227 * 0 - Success
10228 * -ENXIO, -ENOMEM - Failure
10229 **/
10230int
10231lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10232{
10233 LPFC_MBOXQ_t *mbox;
10234 int rc;
10235 uint32_t shdr_status, shdr_add_status;
10236 union lpfc_sli4_cfg_shdr *shdr;
10237
10238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10239 if (!mbox)
10240 return -ENOMEM;
10241
10242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10243 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10244 LPFC_SLI4_MBX_EMBED);
10245 if (!phba->sli4_hba.intr_enable)
10246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10247 else
10248 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10249 /* The IOCTL status is embedded in the mailbox subheader. */
10250 shdr = (union lpfc_sli4_cfg_shdr *)
10251 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10252 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10254 if (rc != MBX_TIMEOUT)
10255 mempool_free(mbox, phba->mbox_mem_pool);
10256 if (shdr_status || shdr_add_status || rc) {
10257 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10258 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10259 "status x%x add_status x%x, mbx status x%x\n",
10260 shdr_status, shdr_add_status, rc);
10261 rc = -ENXIO;
10262 }
10263 return rc;
10264}
10265
10266/**
10267 * lpfc_sli4_next_xritag - Get an xritag for the io
10268 * @phba: Pointer to HBA context object.
10269 *
10270 * This function gets an xritag for the iocb. If there is no unused xritag
10271 * it will return 0xffff.
10272 * The function returns the allocated xritag if successful, else returns zero.
10273 * Zero is not a valid xritag.
10274 * The caller is not required to hold any lock.
10275 **/
10276uint16_t
10277lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10278{
10279 uint16_t xritag;
10280
10281 spin_lock_irq(&phba->hbalock);
10282 xritag = phba->sli4_hba.next_xri;
10283 if ((xritag != (uint16_t) -1) && xritag <
10284 (phba->sli4_hba.max_cfg_param.max_xri
10285 + phba->sli4_hba.max_cfg_param.xri_base)) {
10286 phba->sli4_hba.next_xri++;
10287 phba->sli4_hba.max_cfg_param.xri_used++;
10288 spin_unlock_irq(&phba->hbalock);
10289 return xritag;
10290 }
10291 spin_unlock_irq(&phba->hbalock);
10292
10293 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10294 "2004 Failed to allocate XRI.last XRITAG is %d"
10295 " Max XRI is %d, Used XRI is %d\n",
10296 phba->sli4_hba.next_xri,
10297 phba->sli4_hba.max_cfg_param.max_xri,
10298 phba->sli4_hba.max_cfg_param.xri_used);
10299 return -1;
10300}
10301
10302/**
10303 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10304 * @phba: pointer to lpfc hba data structure.
10305 *
10306 * This routine is invoked to post a block of driver's sgl pages to the
10307 * HBA using non-embedded mailbox command. No Lock is held. This routine
10308 * is only called when the driver is loading and after all IO has been
10309 * stopped.
10310 **/
10311int
10312lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10313{
10314 struct lpfc_sglq *sglq_entry;
10315 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10316 struct sgl_page_pairs *sgl_pg_pairs;
10317 void *viraddr;
10318 LPFC_MBOXQ_t *mbox;
10319 uint32_t reqlen, alloclen, pg_pairs;
10320 uint32_t mbox_tmo;
10321 uint16_t xritag_start = 0;
10322 int els_xri_cnt, rc = 0;
10323 uint32_t shdr_status, shdr_add_status;
10324 union lpfc_sli4_cfg_shdr *shdr;
10325
10326 /* The number of sgls to be posted */
10327 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10328
10329 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10330 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10331 if (reqlen > PAGE_SIZE) {
10332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10333 "2559 Block sgl registration required DMA "
10334 "size (%d) great than a page\n", reqlen);
10335 return -ENOMEM;
10336 }
10337 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10338 if (!mbox) {
10339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10340 "2560 Failed to allocate mbox cmd memory\n");
10341 return -ENOMEM;
10342 }
10343
10344 /* Allocate DMA memory and set up the non-embedded mailbox command */
10345 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10346 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10347 LPFC_SLI4_MBX_NEMBED);
10348
10349 if (alloclen < reqlen) {
10350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10351 "0285 Allocated DMA memory size (%d) is "
10352 "less than the requested DMA memory "
10353 "size (%d)\n", alloclen, reqlen);
10354 lpfc_sli4_mbox_cmd_free(phba, mbox);
10355 return -ENOMEM;
10356 }
10357
10358 /* Get the first SGE entry from the non-embedded DMA memory */
10359 if (unlikely(!mbox->sge_array)) {
10360 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10361 "2525 Failed to get the non-embedded SGE "
10362 "virtual address\n");
10363 lpfc_sli4_mbox_cmd_free(phba, mbox);
10364 return -ENOMEM;
10365 }
10366 viraddr = mbox->sge_array->addr[0];
10367
10368 /* Set up the SGL pages in the non-embedded DMA pages */
10369 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10370 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10371
10372 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10373 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10374 /* Set up the sge entry */
10375 sgl_pg_pairs->sgl_pg0_addr_lo =
10376 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10377 sgl_pg_pairs->sgl_pg0_addr_hi =
10378 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10379 sgl_pg_pairs->sgl_pg1_addr_lo =
10380 cpu_to_le32(putPaddrLow(0));
10381 sgl_pg_pairs->sgl_pg1_addr_hi =
10382 cpu_to_le32(putPaddrHigh(0));
10383 /* Keep the first xritag on the list */
10384 if (pg_pairs == 0)
10385 xritag_start = sglq_entry->sli4_xritag;
10386 sgl_pg_pairs++;
10387 }
10388 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10389 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10390 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10391 /* Perform endian conversion if necessary */
10392 sgl->word0 = cpu_to_le32(sgl->word0);
10393
10394 if (!phba->sli4_hba.intr_enable)
10395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10396 else {
10397 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10398 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10399 }
10400 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10403 if (rc != MBX_TIMEOUT)
10404 lpfc_sli4_mbox_cmd_free(phba, mbox);
10405 if (shdr_status || shdr_add_status || rc) {
10406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10407 "2513 POST_SGL_BLOCK mailbox command failed "
10408 "status x%x add_status x%x mbx status x%x\n",
10409 shdr_status, shdr_add_status, rc);
10410 rc = -ENXIO;
10411 }
10412 return rc;
10413}
10414
10415/**
10416 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10417 * @phba: pointer to lpfc hba data structure.
10418 * @sblist: pointer to scsi buffer list.
10419 * @count: number of scsi buffers on the list.
10420 *
10421 * This routine is invoked to post a block of @count scsi sgl pages from a
10422 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10423 * No Lock is held.
10424 *
10425 **/
10426int
10427lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10428 int cnt)
10429{
10430 struct lpfc_scsi_buf *psb;
10431 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10432 struct sgl_page_pairs *sgl_pg_pairs;
10433 void *viraddr;
10434 LPFC_MBOXQ_t *mbox;
10435 uint32_t reqlen, alloclen, pg_pairs;
10436 uint32_t mbox_tmo;
10437 uint16_t xritag_start = 0;
10438 int rc = 0;
10439 uint32_t shdr_status, shdr_add_status;
10440 dma_addr_t pdma_phys_bpl1;
10441 union lpfc_sli4_cfg_shdr *shdr;
10442
10443 /* Calculate the requested length of the dma memory */
10444 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10445 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10446 if (reqlen > PAGE_SIZE) {
10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10448 "0217 Block sgl registration required DMA "
10449 "size (%d) great than a page\n", reqlen);
10450 return -ENOMEM;
10451 }
10452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10453 if (!mbox) {
10454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10455 "0283 Failed to allocate mbox cmd memory\n");
10456 return -ENOMEM;
10457 }
10458
10459 /* Allocate DMA memory and set up the non-embedded mailbox command */
10460 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10461 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10462 LPFC_SLI4_MBX_NEMBED);
10463
10464 if (alloclen < reqlen) {
10465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10466 "2561 Allocated DMA memory size (%d) is "
10467 "less than the requested DMA memory "
10468 "size (%d)\n", alloclen, reqlen);
10469 lpfc_sli4_mbox_cmd_free(phba, mbox);
10470 return -ENOMEM;
10471 }
10472
10473 /* Get the first SGE entry from the non-embedded DMA memory */
10474 if (unlikely(!mbox->sge_array)) {
10475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10476 "2565 Failed to get the non-embedded SGE "
10477 "virtual address\n");
10478 lpfc_sli4_mbox_cmd_free(phba, mbox);
10479 return -ENOMEM;
10480 }
10481 viraddr = mbox->sge_array->addr[0];
10482
10483 /* Set up the SGL pages in the non-embedded DMA pages */
10484 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10485 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10486
10487 pg_pairs = 0;
10488 list_for_each_entry(psb, sblist, list) {
10489 /* Set up the sge entry */
10490 sgl_pg_pairs->sgl_pg0_addr_lo =
10491 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10492 sgl_pg_pairs->sgl_pg0_addr_hi =
10493 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10494 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10495 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10496 else
10497 pdma_phys_bpl1 = 0;
10498 sgl_pg_pairs->sgl_pg1_addr_lo =
10499 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10500 sgl_pg_pairs->sgl_pg1_addr_hi =
10501 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10502 /* Keep the first xritag on the list */
10503 if (pg_pairs == 0)
10504 xritag_start = psb->cur_iocbq.sli4_xritag;
10505 sgl_pg_pairs++;
10506 pg_pairs++;
10507 }
10508 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10509 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10510 /* Perform endian conversion if necessary */
10511 sgl->word0 = cpu_to_le32(sgl->word0);
10512
10513 if (!phba->sli4_hba.intr_enable)
10514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10515 else {
10516 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10517 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10518 }
10519 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10522 if (rc != MBX_TIMEOUT)
10523 lpfc_sli4_mbox_cmd_free(phba, mbox);
10524 if (shdr_status || shdr_add_status || rc) {
10525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10526 "2564 POST_SGL_BLOCK mailbox command failed "
10527 "status x%x add_status x%x mbx status x%x\n",
10528 shdr_status, shdr_add_status, rc);
10529 rc = -ENXIO;
10530 }
10531 return rc;
10532}
10533
10534/**
10535 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10536 * @phba: pointer to lpfc_hba struct that the frame was received on
10537 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10538 *
10539 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10540 * valid type of frame that the LPFC driver will handle. This function will
10541 * return a zero if the frame is a valid frame or a non zero value when the
10542 * frame does not pass the check.
10543 **/
10544static int
10545lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10546{
10547 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10548 char *type_names[] = FC_TYPE_NAMES_INIT;
10549 struct fc_vft_header *fc_vft_hdr;
10550
10551 switch (fc_hdr->fh_r_ctl) {
10552 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10553 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10554 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10555 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10556 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10557 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10558 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10559 case FC_RCTL_DD_CMD_STATUS: /* command status */
10560 case FC_RCTL_ELS_REQ: /* extended link services request */
10561 case FC_RCTL_ELS_REP: /* extended link services reply */
10562 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10563 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10564 case FC_RCTL_BA_NOP: /* basic link service NOP */
10565 case FC_RCTL_BA_ABTS: /* basic link service abort */
10566 case FC_RCTL_BA_RMC: /* remove connection */
10567 case FC_RCTL_BA_ACC: /* basic accept */
10568 case FC_RCTL_BA_RJT: /* basic reject */
10569 case FC_RCTL_BA_PRMT:
10570 case FC_RCTL_ACK_1: /* acknowledge_1 */
10571 case FC_RCTL_ACK_0: /* acknowledge_0 */
10572 case FC_RCTL_P_RJT: /* port reject */
10573 case FC_RCTL_F_RJT: /* fabric reject */
10574 case FC_RCTL_P_BSY: /* port busy */
10575 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10576 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10577 case FC_RCTL_LCR: /* link credit reset */
10578 case FC_RCTL_END: /* end */
10579 break;
10580 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10581 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10582 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10583 return lpfc_fc_frame_check(phba, fc_hdr);
10584 default:
10585 goto drop;
10586 }
10587 switch (fc_hdr->fh_type) {
10588 case FC_TYPE_BLS:
10589 case FC_TYPE_ELS:
10590 case FC_TYPE_FCP:
10591 case FC_TYPE_CT:
10592 break;
10593 case FC_TYPE_IP:
10594 case FC_TYPE_ILS:
10595 default:
10596 goto drop;
10597 }
10598 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10599 "2538 Received frame rctl:%s type:%s\n",
10600 rctl_names[fc_hdr->fh_r_ctl],
10601 type_names[fc_hdr->fh_type]);
10602 return 0;
10603drop:
10604 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10605 "2539 Dropped frame rctl:%s type:%s\n",
10606 rctl_names[fc_hdr->fh_r_ctl],
10607 type_names[fc_hdr->fh_type]);
10608 return 1;
10609}
10610
10611/**
10612 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10613 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10614 *
10615 * This function processes the FC header to retrieve the VFI from the VF
10616 * header, if one exists. This function will return the VFI if one exists
10617 * or 0 if no VSAN Header exists.
10618 **/
10619static uint32_t
10620lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10621{
10622 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10623
10624 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10625 return 0;
10626 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10627}
10628
10629/**
10630 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10631 * @phba: Pointer to the HBA structure to search for the vport on
10632 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10633 * @fcfi: The FC Fabric ID that the frame came from
10634 *
10635 * This function searches the @phba for a vport that matches the content of the
10636 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10637 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10638 * returns the matching vport pointer or NULL if unable to match frame to a
10639 * vport.
10640 **/
10641static struct lpfc_vport *
10642lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10643 uint16_t fcfi)
10644{
10645 struct lpfc_vport **vports;
10646 struct lpfc_vport *vport = NULL;
10647 int i;
10648 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10649 fc_hdr->fh_d_id[1] << 8 |
10650 fc_hdr->fh_d_id[2]);
10651
10652 vports = lpfc_create_vport_work_array(phba);
10653 if (vports != NULL)
10654 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10655 if (phba->fcf.fcfi == fcfi &&
10656 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10657 vports[i]->fc_myDID == did) {
10658 vport = vports[i];
10659 break;
10660 }
10661 }
10662 lpfc_destroy_vport_work_array(phba, vports);
10663 return vport;
10664}
10665
10666/**
10667 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10668 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10669 *
10670 * This function searches through the existing incomplete sequences that have
10671 * been sent to this @vport. If the frame matches one of the incomplete
10672 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10673 * make up that sequence. If no sequence is found that matches this frame then
10674 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10675 * This function returns a pointer to the first dmabuf in the sequence list that
10676 * the frame was linked to.
10677 **/
10678static struct hbq_dmabuf *
10679lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10680{
10681 struct fc_frame_header *new_hdr;
10682 struct fc_frame_header *temp_hdr;
10683 struct lpfc_dmabuf *d_buf;
10684 struct lpfc_dmabuf *h_buf;
10685 struct hbq_dmabuf *seq_dmabuf = NULL;
10686 struct hbq_dmabuf *temp_dmabuf = NULL;
10687
10688 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10689 /* Use the hdr_buf to find the sequence that this frame belongs to */
10690 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10691 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10692 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10693 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10694 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10695 continue;
10696 /* found a pending sequence that matches this frame */
10697 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10698 break;
10699 }
10700 if (!seq_dmabuf) {
10701 /*
10702 * This indicates first frame received for this sequence.
10703 * Queue the buffer on the vport's rcv_buffer_list.
10704 */
10705 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10706 return dmabuf;
10707 }
10708 temp_hdr = seq_dmabuf->hbuf.virt;
10709 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10710 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10711 return dmabuf;
10712 }
10713 /* find the correct place in the sequence to insert this frame */
10714 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10715 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10716 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10717 /*
10718 * If the frame's sequence count is greater than the frame on
10719 * the list then insert the frame right after this frame
10720 */
10721 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10722 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10723 return seq_dmabuf;
10724 }
10725 }
10726 return NULL;
10727}
10728
10729/**
10730 * lpfc_seq_complete - Indicates if a sequence is complete
10731 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10732 *
10733 * This function checks the sequence, starting with the frame described by
10734 * @dmabuf, to see if all the frames associated with this sequence are present.
10735 * the frames associated with this sequence are linked to the @dmabuf using the
10736 * dbuf list. This function looks for two major things. 1) That the first frame
10737 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10738 * set. 3) That there are no holes in the sequence count. The function will
10739 * return 1 when the sequence is complete, otherwise it will return 0.
10740 **/
10741static int
10742lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10743{
10744 struct fc_frame_header *hdr;
10745 struct lpfc_dmabuf *d_buf;
10746 struct hbq_dmabuf *seq_dmabuf;
10747 uint32_t fctl;
10748 int seq_count = 0;
10749
10750 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10751 /* make sure first fame of sequence has a sequence count of zero */
10752 if (hdr->fh_seq_cnt != seq_count)
10753 return 0;
10754 fctl = (hdr->fh_f_ctl[0] << 16 |
10755 hdr->fh_f_ctl[1] << 8 |
10756 hdr->fh_f_ctl[2]);
10757 /* If last frame of sequence we can return success. */
10758 if (fctl & FC_FC_END_SEQ)
10759 return 1;
10760 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10761 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10762 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10763 /* If there is a hole in the sequence count then fail. */
10764 if (++seq_count != hdr->fh_seq_cnt)
10765 return 0;
10766 fctl = (hdr->fh_f_ctl[0] << 16 |
10767 hdr->fh_f_ctl[1] << 8 |
10768 hdr->fh_f_ctl[2]);
10769 /* If last frame of sequence we can return success. */
10770 if (fctl & FC_FC_END_SEQ)
10771 return 1;
10772 }
10773 return 0;
10774}
10775
10776/**
10777 * lpfc_prep_seq - Prep sequence for ULP processing
10778 * @vport: Pointer to the vport on which this sequence was received
10779 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10780 *
10781 * This function takes a sequence, described by a list of frames, and creates
10782 * a list of iocbq structures to describe the sequence. This iocbq list will be
10783 * used to issue to the generic unsolicited sequence handler. This routine
10784 * returns a pointer to the first iocbq in the list. If the function is unable
10785 * to allocate an iocbq then it throw out the received frames that were not
10786 * able to be described and return a pointer to the first iocbq. If unable to
10787 * allocate any iocbqs (including the first) this function will return NULL.
10788 **/
10789static struct lpfc_iocbq *
10790lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10791{
10792 struct lpfc_dmabuf *d_buf, *n_buf;
10793 struct lpfc_iocbq *first_iocbq, *iocbq;
10794 struct fc_frame_header *fc_hdr;
10795 uint32_t sid;
10796
10797 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10798 /* remove from receive buffer list */
10799 list_del_init(&seq_dmabuf->hbuf.list);
10800 /* get the Remote Port's SID */
10801 sid = (fc_hdr->fh_s_id[0] << 16 |
10802 fc_hdr->fh_s_id[1] << 8 |
10803 fc_hdr->fh_s_id[2]);
10804 /* Get an iocbq struct to fill in. */
10805 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10806 if (first_iocbq) {
10807 /* Initialize the first IOCB. */
10808 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10809 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10810 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10811 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10812 vport->vpi + vport->phba->vpi_base;
10813 /* put the first buffer into the first IOCBq */
10814 first_iocbq->context2 = &seq_dmabuf->dbuf;
10815 first_iocbq->context3 = NULL;
10816 first_iocbq->iocb.ulpBdeCount = 1;
10817 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10818 LPFC_DATA_BUF_SIZE;
10819 first_iocbq->iocb.un.rcvels.remoteID = sid;
10820 }
10821 iocbq = first_iocbq;
10822 /*
10823 * Each IOCBq can have two Buffers assigned, so go through the list
10824 * of buffers for this sequence and save two buffers in each IOCBq
10825 */
10826 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10827 if (!iocbq) {
10828 lpfc_in_buf_free(vport->phba, d_buf);
10829 continue;
10830 }
10831 if (!iocbq->context3) {
10832 iocbq->context3 = d_buf;
10833 iocbq->iocb.ulpBdeCount++;
10834 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10835 LPFC_DATA_BUF_SIZE;
10836 } else {
10837 iocbq = lpfc_sli_get_iocbq(vport->phba);
10838 if (!iocbq) {
10839 if (first_iocbq) {
10840 first_iocbq->iocb.ulpStatus =
10841 IOSTAT_FCP_RSP_ERROR;
10842 first_iocbq->iocb.un.ulpWord[4] =
10843 IOERR_NO_RESOURCES;
10844 }
10845 lpfc_in_buf_free(vport->phba, d_buf);
10846 continue;
10847 }
10848 iocbq->context2 = d_buf;
10849 iocbq->context3 = NULL;
10850 iocbq->iocb.ulpBdeCount = 1;
10851 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10852 LPFC_DATA_BUF_SIZE;
10853 iocbq->iocb.un.rcvels.remoteID = sid;
10854 list_add_tail(&iocbq->list, &first_iocbq->list);
10855 }
10856 }
10857 return first_iocbq;
10858}
10859
10860/**
10861 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10862 * @phba: Pointer to HBA context object.
10863 *
10864 * This function is called with no lock held. This function processes all
10865 * the received buffers and gives it to upper layers when a received buffer
10866 * indicates that it is the final frame in the sequence. The interrupt
10867 * service routine processes received buffers at interrupt contexts and adds
10868 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10869 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10870 * appropriate receive function when the final frame in a sequence is received.
10871 **/
10872int
10873lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
10874{
10875 LIST_HEAD(cmplq);
10876 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
10877 struct fc_frame_header *fc_hdr;
10878 struct lpfc_vport *vport;
10879 uint32_t fcfi;
10880 struct lpfc_iocbq *iocbq;
10881
10882 /* Clear hba flag and get all received buffers into the cmplq */
10883 spin_lock_irq(&phba->hbalock);
10884 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
10885 list_splice_init(&phba->rb_pend_list, &cmplq);
10886 spin_unlock_irq(&phba->hbalock);
10887
10888 /* Process each received buffer */
10889 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
10890 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10891 /* check to see if this a valid type of frame */
10892 if (lpfc_fc_frame_check(phba, fc_hdr)) {
10893 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10894 continue;
10895 }
10896 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
10897 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
10898 if (!vport) {
10899 /* throw out the frame */
10900 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10901 continue;
10902 }
10903 /* Link this frame */
10904 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
10905 if (!seq_dmabuf) {
10906 /* unable to add frame to vport - throw it out */
10907 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10908 continue;
10909 }
10910 /* If not last frame in sequence continue processing frames. */
10911 if (!lpfc_seq_complete(seq_dmabuf)) {
10912 /*
10913 * When saving off frames post a new one and mark this
10914 * frame to be freed when it is finished.
10915 **/
10916 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
10917 dmabuf->tag = -1;
10918 continue;
10919 }
10920 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10921 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
10922 if (!lpfc_complete_unsol_iocb(phba,
10923 &phba->sli.ring[LPFC_ELS_RING],
10924 iocbq, fc_hdr->fh_r_ctl,
10925 fc_hdr->fh_type))
10926 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10927 "2540 Ring %d handler: unexpected Rctl "
10928 "x%x Type x%x received\n",
10929 LPFC_ELS_RING,
10930 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
10931 };
10932 return 0;
10933}
10934
10935/**
10936 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
10937 * @phba: pointer to lpfc hba data structure.
10938 *
10939 * This routine is invoked to post rpi header templates to the
10940 * HBA consistent with the SLI-4 interface spec. This routine
10941 * posts a PAGE_SIZE memory region to the port to hold up to
10942 * PAGE_SIZE modulo 64 rpi context headers.
10943 *
10944 * This routine does not require any locks. It's usage is expected
10945 * to be driver load or reset recovery when the driver is
10946 * sequential.
10947 *
10948 * Return codes
10949 * 0 - sucessful
10950 * EIO - The mailbox failed to complete successfully.
10951 * When this error occurs, the driver is not guaranteed
10952 * to have any rpi regions posted to the device and
10953 * must either attempt to repost the regions or take a
10954 * fatal error.
10955 **/
10956int
10957lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
10958{
10959 struct lpfc_rpi_hdr *rpi_page;
10960 uint32_t rc = 0;
10961
10962 /* Post all rpi memory regions to the port. */
10963 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
10964 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
10965 if (rc != MBX_SUCCESS) {
10966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10967 "2008 Error %d posting all rpi "
10968 "headers\n", rc);
10969 rc = -EIO;
10970 break;
10971 }
10972 }
10973
10974 return rc;
10975}
10976
10977/**
10978 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
10979 * @phba: pointer to lpfc hba data structure.
10980 * @rpi_page: pointer to the rpi memory region.
10981 *
10982 * This routine is invoked to post a single rpi header to the
10983 * HBA consistent with the SLI-4 interface spec. This memory region
10984 * maps up to 64 rpi context regions.
10985 *
10986 * Return codes
10987 * 0 - sucessful
10988 * ENOMEM - No available memory
10989 * EIO - The mailbox failed to complete successfully.
10990 **/
10991int
10992lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
10993{
10994 LPFC_MBOXQ_t *mboxq;
10995 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
10996 uint32_t rc = 0;
10997 uint32_t mbox_tmo;
10998 uint32_t shdr_status, shdr_add_status;
10999 union lpfc_sli4_cfg_shdr *shdr;
11000
11001 /* The port is notified of the header region via a mailbox command. */
11002 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11003 if (!mboxq) {
11004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11005 "2001 Unable to allocate memory for issuing "
11006 "SLI_CONFIG_SPECIAL mailbox command\n");
11007 return -ENOMEM;
11008 }
11009
11010 /* Post all rpi memory regions to the port. */
11011 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11012 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11013 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11014 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11015 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11016 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11017 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11018 hdr_tmpl, rpi_page->page_count);
11019 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11020 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable)
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11030 if (rc != MBX_TIMEOUT)
11031 mempool_free(mboxq, phba->mbox_mem_pool);
11032 if (shdr_status || shdr_add_status || rc) {
11033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11034 "2514 POST_RPI_HDR mailbox failed with "
11035 "status x%x add_status x%x, mbx status x%x\n",
11036 shdr_status, shdr_add_status, rc);
11037 rc = -ENXIO;
11038 }
11039 return rc;
11040}
11041
11042/**
11043 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11044 * @phba: pointer to lpfc hba data structure.
11045 *
11046 * This routine is invoked to post rpi header templates to the
11047 * HBA consistent with the SLI-4 interface spec. This routine
11048 * posts a PAGE_SIZE memory region to the port to hold up to
11049 * PAGE_SIZE modulo 64 rpi context headers.
11050 *
11051 * Returns
11052 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11053 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11054 **/
11055int
11056lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11057{
11058 int rpi;
11059 uint16_t max_rpi, rpi_base, rpi_limit;
11060 uint16_t rpi_remaining;
11061 struct lpfc_rpi_hdr *rpi_hdr;
11062
11063 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11064 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11065 rpi_limit = phba->sli4_hba.next_rpi;
11066
11067 /*
11068 * The valid rpi range is not guaranteed to be zero-based. Start
11069 * the search at the rpi_base as reported by the port.
11070 */
11071 spin_lock_irq(&phba->hbalock);
11072 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11073 if (rpi >= rpi_limit || rpi < rpi_base)
11074 rpi = LPFC_RPI_ALLOC_ERROR;
11075 else {
11076 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11077 phba->sli4_hba.max_cfg_param.rpi_used++;
11078 phba->sli4_hba.rpi_count++;
11079 }
11080
11081 /*
11082 * Don't try to allocate more rpi header regions if the device limit
11083 * on available rpis max has been exhausted.
11084 */
11085 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11086 (phba->sli4_hba.rpi_count >= max_rpi)) {
11087 spin_unlock_irq(&phba->hbalock);
11088 return rpi;
11089 }
11090
11091 /*
11092 * If the driver is running low on rpi resources, allocate another
11093 * page now. Note that the next_rpi value is used because
11094 * it represents how many are actually in use whereas max_rpi notes
11095 * how many are supported max by the device.
11096 */
11097 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11098 phba->sli4_hba.rpi_count;
11099 spin_unlock_irq(&phba->hbalock);
11100 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11101 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11102 if (!rpi_hdr) {
11103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11104 "2002 Error Could not grow rpi "
11105 "count\n");
11106 } else {
11107 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11108 }
11109 }
11110
11111 return rpi;
11112}
11113
11114/**
11115 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11116 * @phba: pointer to lpfc hba data structure.
11117 *
11118 * This routine is invoked to release an rpi to the pool of
11119 * available rpis maintained by the driver.
11120 **/
11121void
11122lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11123{
11124 spin_lock_irq(&phba->hbalock);
11125 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11126 phba->sli4_hba.rpi_count--;
11127 phba->sli4_hba.max_cfg_param.rpi_used--;
11128 spin_unlock_irq(&phba->hbalock);
11129}
11130
11131/**
11132 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11133 * @phba: pointer to lpfc hba data structure.
11134 *
11135 * This routine is invoked to remove the memory region that
11136 * provided rpi via a bitmask.
11137 **/
11138void
11139lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11140{
11141 kfree(phba->sli4_hba.rpi_bmask);
11142}
11143
11144/**
11145 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11146 * @phba: pointer to lpfc hba data structure.
11147 *
11148 * This routine is invoked to remove the memory region that
11149 * provided rpi via a bitmask.
11150 **/
11151int
11152lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11153{
11154 LPFC_MBOXQ_t *mboxq;
11155 struct lpfc_hba *phba = ndlp->phba;
11156 int rc;
11157
11158 /* The port is notified of the header region via a mailbox command. */
11159 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11160 if (!mboxq)
11161 return -ENOMEM;
11162
11163 /* Post all rpi memory regions to the port. */
11164 lpfc_resume_rpi(mboxq, ndlp);
11165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11166 if (rc == MBX_NOT_FINISHED) {
11167 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11168 "2010 Resume RPI Mailbox failed "
11169 "status %d, mbxStatus x%x\n", rc,
11170 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11171 mempool_free(mboxq, phba->mbox_mem_pool);
11172 return -EIO;
11173 }
11174 return 0;
11175}
11176
11177/**
11178 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11179 * @phba: pointer to lpfc hba data structure.
11180 * @vpi: vpi value to activate with the port.
11181 *
11182 * This routine is invoked to activate a vpi with the
11183 * port when the host intends to use vports with a
11184 * nonzero vpi.
11185 *
11186 * Returns:
11187 * 0 success
11188 * -Evalue otherwise
11189 **/
11190int
11191lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11192{
11193 LPFC_MBOXQ_t *mboxq;
11194 int rc = 0;
11195 uint32_t mbox_tmo;
11196
11197 if (vpi == 0)
11198 return -EINVAL;
11199 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11200 if (!mboxq)
11201 return -ENOMEM;
11202 lpfc_init_vpi(mboxq, vpi);
11203 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11205 if (rc != MBX_TIMEOUT)
11206 mempool_free(mboxq, phba->mbox_mem_pool);
11207 if (rc != MBX_SUCCESS) {
11208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11209 "2022 INIT VPI Mailbox failed "
11210 "status %d, mbxStatus x%x\n", rc,
11211 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11212 rc = -EIO;
11213 }
11214 return rc;
11215}
11216
11217/**
11218 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11219 * @phba: pointer to lpfc hba data structure.
11220 * @mboxq: Pointer to mailbox object.
11221 *
11222 * This routine is invoked to manually add a single FCF record. The caller
11223 * must pass a completely initialized FCF_Record. This routine takes
11224 * care of the nonembedded mailbox operations.
11225 **/
11226static void
11227lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11228{
11229 void *virt_addr;
11230 union lpfc_sli4_cfg_shdr *shdr;
11231 uint32_t shdr_status, shdr_add_status;
11232
11233 virt_addr = mboxq->sge_array->addr[0];
11234 /* The IOCTL status is embedded in the mailbox subheader. */
11235 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11238
11239 if ((shdr_status || shdr_add_status) &&
11240 (shdr_status != STATUS_FCF_IN_USE))
11241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11242 "2558 ADD_FCF_RECORD mailbox failed with "
11243 "status x%x add_status x%x\n",
11244 shdr_status, shdr_add_status);
11245
11246 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11247}
11248
11249/**
11250 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11251 * @phba: pointer to lpfc hba data structure.
11252 * @fcf_record: pointer to the initialized fcf record to add.
11253 *
11254 * This routine is invoked to manually add a single FCF record. The caller
11255 * must pass a completely initialized FCF_Record. This routine takes
11256 * care of the nonembedded mailbox operations.
11257 **/
11258int
11259lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11260{
11261 int rc = 0;
11262 LPFC_MBOXQ_t *mboxq;
11263 uint8_t *bytep;
11264 void *virt_addr;
11265 dma_addr_t phys_addr;
11266 struct lpfc_mbx_sge sge;
11267 uint32_t alloc_len, req_len;
11268 uint32_t fcfindex;
11269
11270 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11271 if (!mboxq) {
11272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11273 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11274 return -ENOMEM;
11275 }
11276
11277 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11278 sizeof(uint32_t);
11279
11280 /* Allocate DMA memory and set up the non-embedded mailbox command */
11281 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11282 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11283 req_len, LPFC_SLI4_MBX_NEMBED);
11284 if (alloc_len < req_len) {
11285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11286 "2523 Allocated DMA memory size (x%x) is "
11287 "less than the requested DMA memory "
11288 "size (x%x)\n", alloc_len, req_len);
11289 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11290 return -ENOMEM;
11291 }
11292
11293 /*
11294 * Get the first SGE entry from the non-embedded DMA memory. This
11295 * routine only uses a single SGE.
11296 */
11297 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11298 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11299 if (unlikely(!mboxq->sge_array)) {
11300 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11301 "2526 Failed to get the non-embedded SGE "
11302 "virtual address\n");
11303 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11304 return -ENOMEM;
11305 }
11306 virt_addr = mboxq->sge_array->addr[0];
11307 /*
11308 * Configure the FCF record for FCFI 0. This is the driver's
11309 * hardcoded default and gets used in nonFIP mode.
11310 */
11311 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11312 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11313 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11314
11315 /*
11316 * Copy the fcf_index and the FCF Record Data. The data starts after
11317 * the FCoE header plus word10. The data copy needs to be endian
11318 * correct.
11319 */
11320 bytep += sizeof(uint32_t);
11321 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11322 mboxq->vport = phba->pport;
11323 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11324 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11325 if (rc == MBX_NOT_FINISHED) {
11326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11327 "2515 ADD_FCF_RECORD mailbox failed with "
11328 "status 0x%x\n", rc);
11329 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11330 rc = -EIO;
11331 } else
11332 rc = 0;
11333
11334 return rc;
11335}
11336
11337/**
11338 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11339 * @phba: pointer to lpfc hba data structure.
11340 * @fcf_record: pointer to the fcf record to write the default data.
11341 * @fcf_index: FCF table entry index.
11342 *
11343 * This routine is invoked to build the driver's default FCF record. The
11344 * values used are hardcoded. This routine handles memory initialization.
11345 *
11346 **/
11347void
11348lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11349 struct fcf_record *fcf_record,
11350 uint16_t fcf_index)
11351{
11352 memset(fcf_record, 0, sizeof(struct fcf_record));
11353 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11354 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11355 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11356 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11357 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11358 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11359 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11360 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11361 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11362 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11369 /* Set the VLAN bit map */
11370 if (phba->valid_vlan) {
11371 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11372 = 1 << (phba->vlan_id % 8);
11373 }
11374}
11375
11376/**
11377 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11378 * @phba: pointer to lpfc hba data structure.
11379 * @fcf_index: FCF table entry offset.
11380 *
11381 * This routine is invoked to read up to @fcf_num of FCF record from the
11382 * device starting with the given @fcf_index.
11383 **/
11384int
11385lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11386{
11387 int rc = 0, error;
11388 LPFC_MBOXQ_t *mboxq;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 uint8_t *bytep;
11392 struct lpfc_mbx_sge sge;
11393 uint32_t alloc_len, req_len;
11394 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11395
11396 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11397 if (!mboxq) {
11398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11399 "2000 Failed to allocate mbox for "
11400 "READ_FCF cmd\n");
11401 return -ENOMEM;
11402 }
11403
11404 req_len = sizeof(struct fcf_record) +
11405 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11406
11407 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11408 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11409 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11410 LPFC_SLI4_MBX_NEMBED);
11411
11412 if (alloc_len < req_len) {
11413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11414 "0291 Allocated DMA memory size (x%x) is "
11415 "less than the requested DMA memory "
11416 "size (x%x)\n", alloc_len, req_len);
11417 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11418 return -ENOMEM;
11419 }
11420
11421 /* Get the first SGE entry from the non-embedded DMA memory. This
11422 * routine only uses a single SGE.
11423 */
11424 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11425 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11426 if (unlikely(!mboxq->sge_array)) {
11427 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11428 "2527 Failed to get the non-embedded SGE "
11429 "virtual address\n");
11430 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11431 return -ENOMEM;
11432 }
11433 virt_addr = mboxq->sge_array->addr[0];
11434 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11435
11436 /* Set up command fields */
11437 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11438 /* Perform necessary endian conversion */
11439 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11440 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11441 mboxq->vport = phba->pport;
11442 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11444 if (rc == MBX_NOT_FINISHED) {
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 error = -EIO;
11447 } else
11448 error = 0;
11449 return error;
11450}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6a..7d37eb7459bf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 75 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 76 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 77 struct lpfc_iocbq *);
68 78 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 79};
70 80
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 81#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 91typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 92 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 93 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 94 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 95 MAILBOX_t mb; /* Mailbox cmd */
96 struct lpfc_mqe mqe;
97 } u;
98 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 99 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 100 void *context2; /* caller context information */
88 101
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 102 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 103 uint8_t mbox_flag;
91 104 struct lpfc_mcqe mcqe;
105 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 106} LPFC_MBOXQ_t;
93 107
94#define MBX_POLL 1 /* poll mailbox till command done, then 108#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
230 244
231 /* Additional sli_flags */ 245 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 246#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 247#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 248#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 249#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 250#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
251#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 252
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 253 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 254 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
261 276
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 277#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 278 command */
279#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
280 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 281#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 282 * or erase cmds. This is especially
266 * long because of the potential of 283 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 000000000000..5196b46608d7
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e3078..6b8a148f0a55 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.2"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0aff..a6313ee84ac5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 740 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 741 struct lpfc_vport **vports;
712 int index = 0; 742 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 743 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 744 GFP_KERNEL);
715 if (vports == NULL) 745 if (vports == NULL)
716 return NULL; 746 return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 764 int i;
735 if (vports == NULL) 765 if (vports == NULL)
736 return; 766 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 767 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 768 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 769 kfree(vports);
740} 770}